repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
halbbob/dff | refs/heads/develop | modules/viewer/hexedit/cursors.py | 1 | # DFF -- An Open Source Digital Forensics Framework
# Copyright (C) 2009 ArxSys
#
# This program is free software, distributed under the terms of
# the GNU General Public License Version 2. See the LICENSE file
# at the top of the source tree.
#
# See http://www.digital-forensic.org for more information about this
# project. Please do not directly contact any of the maintainers of
# DFF for assistance; the project provides a web site, mailing lists
# and IRC channels for your use.
#
# Author(s):
# Jeremy Mounier <[email protected]>
#
from PyQt4.QtCore import QString, Qt, QPointF
from PyQt4.QtGui import QWidget, QFont, QColor, QBrush, QPen, QGraphicsRectItem
class hexCursor(QGraphicsRectItem):
def __init__(self, whex):
QGraphicsRectItem.__init__(self)
self.init(whex)
def init(self, whex):
self.whex = whex
self.hexitem = self.whex.hexitem
self.heditor = self.whex.heditor
self.w = self.hexitem.byteW
self.h = self.w
self.xpos = 0
self.ypos = 0
self.brush = QBrush(Qt.NoBrush)
self.pen = QPen(QColor(Qt.darkCyan))
self.setBrush(self.brush)
self.setPen(self.pen)
# self.setParentItem(parent)
def draw(self, posx, posy):
x = 95 + (posx * self.hexitem.byteW) + (posx * self.hexitem.charW) + (self.hexitem.startBlank / 2)
y = 25 + (posy * self.hexitem.byteH) + (self.hexitem.startBlank / 2)
self.xpos = posx
self.ypos = posy
self.setRect(x, y, self.w, self.h)
self.setVisible(True)
def update(self):
x = (self.heditor.selection.offset - self.heditor.currentOffset) % self.heditor.bytesPerLine
y = (self.heditor.selection.offset - self.heditor.currentOffset) / self.heditor.bytesPerLine
if y >= 0 and y < (self.heditor.readSize / self.heditor.bytesPerLine):
self.setVisible(True)
self.xpos = x
self.ypos = y
self.draw(x, y)
else:
self.setVisible(False)
# if (self.heditor.selection.offset >= self.heditor.currentOffset) and (self.heditor.selection.offset < (self.heditor.currentOffset + self.heditor.pageSize)):
#For futur implementations
# def moveUp(self, move):
# if (self.ypos - move) > 0:
# self.ypos -= move
# x = 95 + (self.xpos * 20) + (self.xpos * 4)
# y = 25 + (self.ypos * 15) + (self.ypos * 4)
# self.setRect(x, y, self.w, self.h)
# else:
# self.setVisible(False)
# def moveDown(self, move):
# if (self.ypos + move) < 32:
# self.ypos += move
# x = 95 + (self.xpos * 20) + (self.xpos * 4)
# y = 25 + (self.ypos * 15) + (self.ypos * 4)
# self.setRect(x, y, self.w, self.h)
# else:
# self.setVisible(False)
class asciiCursor(QGraphicsRectItem):
def __init__(self, whex):
QGraphicsRectItem.__init__(self)
self.init(whex)
def init(self, whex):
self.whex = whex
self.asciitem = self.whex.asciitem
self.heditor = self.whex.heditor
self.w = self.asciitem.byteW
self.h = self.w * 2
self.brush = QBrush(Qt.NoBrush)
self.pen = QPen(QColor(Qt.black))
self.setBrush(self.brush)
self.setPen(self.pen)
# self.setParentItem(whex)
def draw(self, posx, posy):
x = 95 + 390 + (posx * self.asciitem.byteW) + (self.asciitem.startBlank / 2)
y = 25 + (posy * self.asciitem.byteH) + (self.asciitem.startBlank / 2)
self.xpos = posx
self.ypos = posy
self.setRect(x, y, self.w, self.h)
self.setVisible(True)
def update(self):
x = (self.heditor.selection.offset - self.heditor.currentOffset) % self.heditor.bytesPerLine
y = (self.heditor.selection.offset - self.heditor.currentOffset) / self.heditor.bytesPerLine
if y >= 0 and y < (self.heditor.readSize / self.heditor.bytesPerLine):
self.setVisible(True)
self.ypos = y
self.xpos = x
self.draw(x, y)
else:
self.setVisible(False)
|
sumitsourabh/opencog | refs/heads/master | opencog/python/learning/bayesian_learning/__init__.py | 281 | __author__ = 'keyvan'
|
basicthinker/THNVM | refs/heads/master | ext/ply/test/calclex.py | 164 | # -----------------------------------------------------------------------------
# calclex.py
# -----------------------------------------------------------------------------
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import ply.lex as lex
tokens = (
'NAME','NUMBER',
'PLUS','MINUS','TIMES','DIVIDE','EQUALS',
'LPAREN','RPAREN',
)
# Tokens
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_EQUALS = r'='
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'
def t_NUMBER(t):
r'\d+'
try:
t.value = int(t.value)
except ValueError:
print("Integer value too large %s" % t.value)
t.value = 0
return t
t_ignore = " \t"
def t_newline(t):
r'\n+'
t.lineno += t.value.count("\n")
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
# Build the lexer
lex.lex()
|
mthurlin/gevent-MySQL | refs/heads/master | lib/geventmysql/client.py | 1 | # Copyright (C) 2009, Hyves (Startphone Ltd.)
#
# This module is part of the Concurrence Framework and is released under
# the New BSD License: http://www.opensource.org/licenses/bsd-license.php
#TODO supporting closing a halfread resultset (e.g. automatically read and discard rest)
import errno
from geventmysql._mysql import Buffer
from geventmysql.mysql import BufferedPacketReader, BufferedPacketWriter, PACKET_READ_RESULT, CAPS, COMMAND
import logging
import time
from gevent import socket
import gevent
import sys
# From query: SHOW COLLATION;
charset_map = {}
charset_map["big5"] = 1
charset_map["dec8"] = 3
charset_map["cp850"] = 4
charset_map["hp8"] = 6
charset_map["koi8r"] = 7
charset_map["latin1"] = 8
charset_map["latin1"] = 8
charset_map["latin2"] = 9
charset_map["swe7"] = 10
charset_map["ascii"] = 11
charset_map["ujis"] = 12
charset_map["sjis"] = 13
charset_map["hebrew"] = 16
charset_map["tis620"] = 18
charset_map["euckr"] = 19
charset_map["koi8u"] = 22
charset_map["gb2312"] = 24
charset_map["greek"] = 25
charset_map["cp1250"] = 26
charset_map["gbk"] = 28
charset_map["latin5"] = 30
charset_map["armscii8"] = 32
charset_map["utf8"] = 33
charset_map["utf8"] = 33
charset_map["ucs2"] = 35
charset_map["cp866"] = 36
charset_map["keybcs2"] = 37
charset_map["macce"] = 38
charset_map["macroman"] = 39
charset_map["cp852"] = 40
charset_map["latin7"] = 41
charset_map["cp1251"] = 51
charset_map["cp1256"] = 57
charset_map["cp1257"] = 59
charset_map["binary"] = 63
charset_map["geostd8"] = 92
charset_map["cp932"] = 95
charset_map["eucjpms"] = 97
try:
#python 2.6
import hashlib
SHA = hashlib.sha1
except ImportError:
#python 2.5
import sha
SHA = sha.new
#import time
class ClientError(Exception):
@classmethod
def from_error_packet(cls, packet, skip = 8):
packet.skip(skip)
return cls(packet.read_bytes(packet.remaining))
class ClientLoginError(ClientError): pass
class ClientCommandError(ClientError): pass
class ClientProgrammingError(ClientError): pass
class ResultSet(object):
"""Represents the current resultset being read from a Connection.
The resultset implements an iterator over rows. A Resultset must
be iterated entirely and closed explicitly."""
STATE_INIT = 0
STATE_OPEN = 1
STATE_EOF = 2
STATE_CLOSED = 3
def __init__(self, connection, field_count):
self.state = self.STATE_INIT
self.connection = connection
self.fields = connection.reader.read_fields(field_count)
self.state = self.STATE_OPEN
def __iter__(self):
assert self.state == self.STATE_OPEN, "cannot iterate a resultset when it is not open"
for row in self.connection.reader.read_rows(self.fields):
yield row
self.state = self.STATE_EOF
def close(self, connection_close = False):
"""Closes the current resultset. Make sure you have iterated over all rows before closing it!"""
#print 'close on ResultSet', id(self.connection)
if self.state != self.STATE_EOF and not connection_close:
raise ClientProgrammingError("you can only close a resultset when it was read entirely!")
connection = self.connection
del self.connection
del self.fields
connection._close_current_resultset(self)
self.state = self.STATE_CLOSED
class Connection(object):
"""Represents a single connection to a MySQL Database host."""
STATE_ERROR = -1
STATE_INIT = 0
STATE_CONNECTING = 1
STATE_CONNECTED = 2
STATE_CLOSING = 3
STATE_CLOSED = 4
def __init__(self):
self.state = self.STATE_INIT
self.buffer = Buffer(1024 * 16)
self.socket = None
self.reader = None
self.writer = None
self._time_command = False #whether to keep timing stats on a cmd
self._command_time = -1
self._incommand = False
self.current_resultset = None
def _scramble(self, password, seed):
"""taken from java jdbc driver, scrambles the password using the given seed
according to the mysql login protocol"""
stage1 = SHA(password).digest()
stage2 = SHA(stage1).digest()
md = SHA()
md.update(seed)
md.update(stage2)
#i love python :-):
return ''.join(map(chr, [x ^ ord(stage1[i]) for i, x in enumerate(map(ord, md.digest()))]))
def _handshake(self, user, password, database, charset):
"""performs the mysql login handshake"""
#init buffer for reading (both pos and lim = 0)
self.buffer.clear()
self.buffer.flip()
#read server welcome
packet = self.reader.read_packet()
self.protocol_version = packet.read_byte() #normally this would be 10 (0xa)
if self.protocol_version == 0xff:
#error on initial greeting, possibly too many connection error
raise ClientLoginError.from_error_packet(packet, skip = 2)
elif self.protocol_version == 0xa:
pass #expected
else:
assert False, "Unexpected protocol version %02x" % self.protocol_version
self.server_version = packet.read_bytes_until(0)
packet.skip(4) #thread_id
scramble_buff = packet.read_bytes(8)
packet.skip(1) #filler
server_caps = packet.read_short()
#CAPS.dbg(server_caps)
if not server_caps & CAPS.PROTOCOL_41:
assert False, "<4.1 auth not supported"
server_language = packet.read_byte()
server_status = packet.read_short()
packet.skip(13) #filler
if packet.remaining:
scramble_buff += packet.read_bytes_until(0)
else:
assert False, "<4.1 auth not supported"
client_caps = server_caps
#always turn off compression
client_caps &= ~CAPS.COMPRESS
client_caps &= ~CAPS.NO_SCHEMA
#always turn off ssl
client_caps &= ~CAPS.SSL
if not server_caps & CAPS.CONNECT_WITH_DB and database:
assert False, "initial db given but not supported by server"
if server_caps & CAPS.CONNECT_WITH_DB and not database:
client_caps &= ~CAPS.CONNECT_WITH_DB
#build and write our answer to the initial handshake packet
self.writer.clear()
self.writer.start()
self.writer.write_int(client_caps)
self.writer.write_int(1024 * 1024 * 32) #16mb max packet
if charset:
self.writer.write_byte(charset_map[charset.replace("-", "")])
else:
self.writer.write_byte(server_language)
self.writer.write_bytes('\0' * 23) #filler
self.writer.write_bytes(user + '\0')
if password:
self.writer.write_byte(20)
self.writer.write_bytes(self._scramble(password, scramble_buff))
else:
self.writer.write_byte(0)
if database:
self.writer.write_bytes(database + '\0')
self.writer.finish(1)
self.writer.flush()
#read final answer from server
self.buffer.flip()
packet = self.reader.read_packet()
result = packet.read_byte()
if result == 0xff:
raise ClientLoginError.from_error_packet(packet)
elif result == 0xfe:
assert False, "old password handshake not implemented"
def _close_current_resultset(self, resultset):
assert resultset == self.current_resultset
self.current_resultset = None
def _send_command(self, cmd, cmd_text):
"""sends a command with the given text"""
#self.log.debug('cmd %s %s', cmd, cmd_text)
#note: we are not using normal writer.start/finish here, because the cmd
#could not fit in buffer, causing flushes in write_string, in that case 'finish' would
#not be able to go back to the header of the packet to write the length in that case
self.writer.clear()
self.writer.write_header(len(cmd_text) + 1 + 4, 0) #1 is len of cmd, 4 is len of header, 0 is packet number
self.writer.write_byte(cmd)
self.writer.write_bytes(cmd_text)
self.writer.flush()
def _close(self):
#self.log.debug("close mysql client %s", id(self))
try:
self.state = self.STATE_CLOSING
if self.current_resultset:
self.current_resultset.close(True)
self.socket.close()
self.state = self.STATE_CLOSED
except:
self.state = self.STATE_ERROR
raise
def connect(self, host = "localhost", port = 3306, user = "", password = "", db = "", autocommit = None, charset = None, use_unicode=False):
"""connects to the given host and port with user and password"""
#self.log.debug("connect mysql client %s %s %s %s %s", id(self), host, port, user, password)
try:
#parse addresses of form str <host:port>
assert type(host) == str, "make sure host is a string"
if host[0] == '/': #assume unix domain socket
addr = host
elif ':' in host:
host, port = host.split(':')
port = int(port)
addr = (host, port)
else:
addr = (host, port)
assert self.state == self.STATE_INIT, "make sure connection is not already connected or closed"
self.state = self.STATE_CONNECTING
self.socket = socket.create_connection(addr)
self.reader = BufferedPacketReader(self.socket, self.buffer)
self.writer = BufferedPacketWriter(self.socket, self.buffer)
self._handshake(user, password, db, charset)
#handshake complete client can now send commands
self.state = self.STATE_CONNECTED
if autocommit == False:
self.set_autocommit(False)
elif autocommit == True:
self.set_autocommit(True)
else:
pass #whatever is the default of the db (ON in the case of mysql)
if charset is not None:
self.set_charset(charset)
self.set_use_unicode(use_unicode)
return self
except gevent.Timeout:
self.state = self.STATE_INIT
raise
except ClientLoginError:
self.state = self.STATE_INIT
raise
except:
self.state = self.STATE_ERROR
raise
def close(self):
"""close this connection"""
assert self.is_connected(), "make sure connection is connected before closing"
if self._incommand != False: assert False, "cannot close while still in a command"
self._close()
def command(self, cmd, cmd_text):
"""sends a COM_XXX command with the given text and possibly return a resultset (select)"""
#print 'command', cmd, repr(cmd_text), type(cmd_text)
assert type(cmd_text) == str #as opposed to unicode
assert self.is_connected(), "make sure connection is connected before query"
if self._incommand != False: assert False, "overlapped commands not supported"
if self.current_resultset: assert False, "overlapped commands not supported, pls read prev resultset and close it"
try:
self._incommand = True
if self._time_command:
start_time = time.time()
self._send_command(cmd, cmd_text)
#read result, expect 1 of OK, ERROR or result set header
self.buffer.flip()
packet = self.reader.read_packet()
result = packet.read_byte()
#print 'res', result
if self._time_command:
end_time = time.time()
self._command_time = end_time - start_time
if result == 0x00:
#OK, return (affected rows, last row id)
rowcount = self.reader.read_length_coded_binary()
lastrowid = self.reader.read_length_coded_binary()
return (rowcount, lastrowid)
elif result == 0xff:
raise ClientCommandError.from_error_packet(packet)
else: #result set
self.current_resultset = ResultSet(self, result)
return self.current_resultset
except socket.error, e:
(errorcode, errorstring) = e
if errorcode in [errno.ECONNABORTED, errno.ECONNREFUSED, errno.ECONNRESET, errno.EPIPE]:
self._incommand = False
self.close()
if sys.platform == "win32":
if errorcode in [errno.WSAECONNABORTED]:
self._incommand = False
self.close()
raise
finally:
self._incommand = False
def is_connected(self):
return self.state == self.STATE_CONNECTED
def query(self, cmd_text):
"""Sends a COM_QUERY command with the given text and return a resultset (select)"""
return self.command(COMMAND.QUERY, cmd_text)
def init_db(self, cmd_text):
"""Sends a COM_INIT command with the given text"""
return self.command(COMMAND.INITDB, cmd_text)
def set_autocommit(self, commit):
"""Sets autocommit setting for this connection. True = on, False = off"""
self.command(COMMAND.QUERY, "SET AUTOCOMMIT = %s" % ('1' if commit else '0'))
def commit(self):
"""Commits this connection"""
self.command(COMMAND.QUERY, "COMMIT")
def rollback(self):
"""Issues a rollback on this connection"""
self.command(COMMAND.QUERY, "ROLLBACK")
def set_charset(self, charset):
"""Sets the charset for this connections (used to decode string fields into unicode strings)"""
self.reader.reader.encoding = charset
def set_use_unicode(self, use_unicode):
self.reader.reader.use_unicode = use_unicode
def set_time_command(self, time_command):
self._time_command = time_command
def get_command_time(self):
return self._command_time
Connection.log = logging.getLogger(Connection.__name__)
def connect(*args, **kwargs):
return Connection().connect(*args, **kwargs)
|
2ndQuadrant/ansible | refs/heads/master | lib/ansible/modules/network/vyos/vyos_l3_interface.py | 56 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: vyos_l3_interface
version_added: "2.4"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage L3 interfaces on VyOS network devices
description:
- This module provides declarative management of L3 interfaces
on VyOS network devices.
notes:
- Tested against VYOS 1.1.7
options:
name:
description:
- Name of the L3 interface.
ipv4:
description:
- IPv4 of the L3 interface.
ipv6:
description:
- IPv6 of the L3 interface.
aggregate:
description: List of L3 interfaces definitions
state:
description:
- State of the L3 interface configuration.
default: present
choices: ['present', 'absent']
extends_documentation_fragment: vyos
"""
EXAMPLES = """
- name: Set eth0 IPv4 address
vyos_l3_interface:
name: eth0
ipv4: 192.168.0.1/24
- name: Remove eth0 IPv4 address
vyos_l3_interface:
name: eth0
state: absent
- name: Set IP addresses on aggregate
vyos_l3_interface:
aggregate:
- { name: eth1, ipv4: 192.168.2.10/24 }
- { name: eth2, ipv4: 192.168.3.10/24, ipv6: "fd5d:12c9:2201:1::1/64" }
- name: Remove IP addresses on aggregate
vyos_l3_interface:
aggregate:
- { name: eth1, ipv4: 192.168.2.10/24 }
- { name: eth2, ipv4: 192.168.3.10/24, ipv6: "fd5d:12c9:2201:1::1/64" }
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- set interfaces ethernet eth0 address '192.168.0.1/24'
"""
import socket
import re
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import is_masklen, validate_ip_address
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.vyos.vyos import load_config, run_commands
from ansible.module_utils.network.vyos.vyos import vyos_argument_spec
def is_ipv4(value):
if value:
address = value.split('/')
if is_masklen(address[1]) and validate_ip_address(address[0]):
return True
return False
def is_ipv6(value):
if value:
address = value.split('/')
if 0 <= int(address[1]) <= 128:
try:
socket.inet_pton(socket.AF_INET6, address[0])
except socket.error:
return False
return True
return False
def search_obj_in_list(name, lst):
for o in lst:
if o['name'] == name:
return o
return None
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
for w in want:
name = w['name']
ipv4 = w['ipv4']
ipv6 = w['ipv6']
state = w['state']
obj_in_have = search_obj_in_list(name, have)
if state == 'absent' and obj_in_have:
if not ipv4 and not ipv6 and (obj_in_have['ipv4'] or obj_in_have['ipv6']):
if name == "lo":
commands.append('delete interfaces loopback lo address')
else:
commands.append('delete interfaces ethernet ' + name + ' address')
else:
if ipv4 and ipv4 in obj_in_have['ipv4']:
if name == "lo":
commands.append('delete interfaces loopback lo address ' + ipv4)
else:
commands.append('delete interfaces ethernet ' + name + ' address ' + ipv4)
if ipv6 and ipv6 in obj_in_have['ipv6']:
if name == "lo":
commands.append('delete interfaces loopback lo address ' + ipv6)
else:
commands.append('delete interfaces ethernet ' + name + ' address ' + ipv6)
elif (state == 'present' and obj_in_have):
if ipv4 and ipv4 not in obj_in_have['ipv4']:
if name == "lo":
commands.append('set interfaces loopback lo address ' + ipv4)
else:
commands.append('set interfaces ethernet ' + name + ' address ' + ipv4)
if ipv6 and ipv6 not in obj_in_have['ipv6']:
if name == "lo":
commands.append('set interfaces loopback lo address ' + ipv6)
else:
commands.append('set interfaces ethernet ' + name + ' address ' + ipv6)
return commands
def map_config_to_obj(module):
obj = []
output = run_commands(module, ['show interfaces'])
lines = re.split(r'\n[e|l]', output[0])[1:]
if len(lines) > 0:
for line in lines:
splitted_line = line.split()
if len(splitted_line) > 0:
ipv4 = []
ipv6 = []
if splitted_line[0].lower().startswith('th'):
name = 'e' + splitted_line[0].lower()
elif splitted_line[0].lower().startswith('o'):
name = 'l' + splitted_line[0].lower()
for i in splitted_line[1:]:
if (('.' in i or ':' in i) and '/' in i):
value = i.split(r'\n')[0]
if is_ipv4(value):
ipv4.append(value)
elif is_ipv6(value):
ipv6.append(value)
obj.append({'name': name,
'ipv4': ipv4,
'ipv6': ipv6})
return obj
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
obj.append(item.copy())
else:
obj.append({
'name': module.params['name'],
'ipv4': module.params['ipv4'],
'ipv6': module.params['ipv6'],
'state': module.params['state']
})
return obj
def main():
""" main entry point for module execution
"""
element_spec = dict(
name=dict(),
ipv4=dict(),
ipv6=dict(),
state=dict(default='present',
choices=['present', 'absent'])
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['name'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
)
argument_spec.update(element_spec)
argument_spec.update(vyos_argument_spec)
required_one_of = [['name', 'aggregate']]
mutually_exclusive = [['name', 'aggregate']]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
commit = not module.check_mode
load_config(module, commands, commit=commit)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
YOTOV-LIMITED/kitsune | refs/heads/master | kitsune/sumo/__init__.py | 13 | class ProgrammingError(Exception):
"""Somebody made a mistake in the code."""
# MONKEYPATCH! WOO HOO! LULZ
from kitsune.sumo.monkeypatch import patch # noqa
patch()
|
Vegasvikk/django-cms | refs/heads/develop | cms/south_migrations/0071_mptt_to_mp.py | 51 | # -*- coding: utf-8 -*-
from django.db.models import F
from django.middleware import transaction
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from treebeard.numconv import NumConv
STEPLEN = 4
ALPHABET = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
class MP_AddHandler(object):
def __init__(self):
self.stmts = []
NUM = NumConv(len(ALPHABET), ALPHABET)
def _int2str(num):
return NUM.int2str(num)
def _str2int(num):
return NUM.str2int(num)
def _get_basepath(path, depth):
""":returns: The base path of another path up to a given depth"""
if path:
return path[0:depth * STEPLEN]
return ''
def _get_path(path, depth, newstep):
"""
Builds a path given some values
:param path: the base path
:param depth: the depth of the node
:param newstep: the value (integer) of the new step
"""
parentpath = _get_basepath(path, depth - 1)
key = _int2str(newstep)
return '{0}{1}{2}'.format(
parentpath,
ALPHABET[0] * (STEPLEN - len(key)),
key
)
def _inc_path(obj):
""":returns: The path of the next sibling of a given node path."""
newpos = _str2int(obj.path[-STEPLEN:]) + 1
key = _int2str(newpos)
if len(key) > STEPLEN:
raise Exception(_("Path Overflow from: '%s'" % (obj.path, )))
return '{0}{1}{2}'.format(
obj.path[:-STEPLEN],
ALPHABET[0] * (STEPLEN - len(key)),
key
)
class MP_AddRootHandler(MP_AddHandler):
def __init__(self, **kwargs):
super(MP_AddRootHandler, self).__init__()
self.kwargs = kwargs
def process(self):
# do we have a root node already?
last_root = self.kwargs['last_root']
if last_root:
# adding the new root node as the last one
newpath = _inc_path(last_root)
else:
# adding the first root node
newpath = _get_path(None, 1, 1)
newobj = self.kwargs['instance']
newobj.depth = 1
newobj.path = newpath
# saving the instance before returning it
newobj.save()
return newobj
class MP_AddChildHandler(MP_AddHandler):
def __init__(self, node, model, **kwargs):
super(MP_AddChildHandler, self).__init__()
self.node = node
self.node_cls = node.__class__
self.kwargs = kwargs
self.model = model
def process(self):
newobj = self.kwargs['instance']
newobj.depth = self.node.depth + 1
if self.node.numchild == 0:
# the node had no children, adding the first child
newobj.path = _get_path(
self.node.path, newobj.depth, 1)
max_length = self.node_cls._meta.get_field('path').max_length
if len(newobj.path) > max_length:
raise Exception(
_('The new node is too deep in the tree, try'
' increasing the path.max_length property'
' and UPDATE your database'))
else:
# adding the new child as the last one
newobj.path = _inc_path(self.node.last_child)
# saving the instance before returning it
newobj.save()
newobj._cached_parent_obj = self.node
self.model.objects.filter(
path=self.node.path).update(numchild=F('numchild')+1)
# we increase the numchild value of the object in memory
self.node.numchild += 1
return newobj
class Migration(DataMigration):
def forwards(self, orm):
pages = orm['cms.Page'].objects.all().order_by('tree_id', 'level', 'lft')
cache = {}
last_root = None
for page in pages:
if not page.parent_id:
handler = MP_AddRootHandler(instance=page, last_root=last_root)
handler.process()
last_root = page
page.last_child = None
else:
parent = cache[page.parent_id]
handler = MP_AddChildHandler(parent, orm['cms.Page'], instance=page)
handler.process()
parent.last_child = page
cache[page.pk] = page
plugins = orm['cms.CMSPlugin'].objects.all().order_by('tree_id', 'level', 'lft')
cache = {}
last_root = None
for plugin in plugins:
if not plugin.parent_id:
handler = MP_AddRootHandler(instance=plugin, last_root=last_root)
handler.process()
last_root = plugin
plugin.last_child = None
else:
parent = cache[plugin.parent_id]
handler = MP_AddChildHandler(parent, orm['cms.CMSPlugin'], instance=plugin)
handler.process()
parent.last_child = plugin
cache[plugin.pk] = plugin
def backwards(self, orm):
print("***********************************")
print("***********************************")
print("***********************************")
print(" ATTENTION")
print(" =========")
print("")
print("Your tree is now in an nonfunctional")
print("state. Please install an old version")
print("of django CMS (3.0.5) and run:")
print(" python manage.py cms fix-mptt")
print()
print()
print("************************************")
print("************************************")
print("************************************")
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.aliaspluginmodel': {
'Meta': {'object_name': 'AliasPluginModel', '_ormbases': ['cms.CMSPlugin']},
'alias_placeholder': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'alias_placeholder'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'plugin': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'alias_reference'", 'null': 'True', 'to': "orm['cms.CMSPlugin']"})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'unique_together': "(('publisher_is_draft', 'application_namespace'), ('reverse_id', 'site', 'publisher_is_draft'))", 'object_name': 'Page'},
'application_namespace': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_home': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True'}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'revision_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_pages'", 'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'default': "'INHERIT'", 'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'xframe_options': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [u'auth.User']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_users'", 'to': u"orm['auth.User']"}),
u'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': [u'auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_usergroups'", 'to': u"orm['auth.User']"}),
u'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'cms.placeholderreference': {
'Meta': {'object_name': 'PlaceholderReference', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'placeholder_ref': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'})
},
'cms.staticplaceholder': {
'Meta': {'unique_together': "(('code', 'site'),)", 'object_name': 'StaticPlaceholder'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'creation_method': ('django.db.models.fields.CharField', [], {'default': "'code'", 'max_length': '20', 'blank': 'True'}),
'dirty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'draft': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'static_draft'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'public': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'static_public'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)", 'object_name': 'Title'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [], {'max_length': '155', 'null': 'True', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Title']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms.usersettings': {
'Meta': {'object_name': 'UserSettings'},
'clipboard': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_usersettings'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms']
symmetrical = True
|
AndreManzano/django-rblreport | refs/heads/master | coredata/views.py | 1 | # from django.shortcuts import render
from rest_framework import viewsets
from .models import Ip, Rbl
from .serializers import IpSerializer, RblSerializer
class IpViewSet(viewsets.ModelViewSet):
queryset = Ip.objects.all().order_by('id')
serializer_class = IpSerializer
class RblViewSet(viewsets.ModelViewSet):
queryset = Rbl.objects.all().order_by('id')
serializer_class = RblSerializer
|
tvibliani/odoo | refs/heads/8.0 | addons/point_of_sale/point_of_sale.py | 28 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import psycopg2
import time
from datetime import datetime
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools import float_is_zero
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
import openerp.addons.product.product
_logger = logging.getLogger(__name__)
class pos_config(osv.osv):
_name = 'pos.config'
POS_CONFIG_STATE = [
('active', 'Active'),
('inactive', 'Inactive'),
('deprecated', 'Deprecated')
]
def _get_currency(self, cr, uid, ids, fieldnames, args, context=None):
result = dict.fromkeys(ids, False)
for pos_config in self.browse(cr, uid, ids, context=context):
if pos_config.journal_id:
currency_id = pos_config.journal_id.currency.id or pos_config.journal_id.company_id.currency_id.id
else:
currency_id = self.pool['res.users'].browse(cr, uid, uid, context=context).company_id.currency_id.id
result[pos_config.id] = currency_id
return result
_columns = {
'name' : fields.char('Point of Sale Name', select=1,
required=True, help="An internal identification of the point of sale"),
'journal_ids' : fields.many2many('account.journal', 'pos_config_journal_rel',
'pos_config_id', 'journal_id', 'Available Payment Methods',
domain="[('journal_user', '=', True ), ('type', 'in', ['bank', 'cash'])]",),
'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type'),
'stock_location_id': fields.many2one('stock.location', 'Stock Location', domain=[('usage', '=', 'internal')], required=True),
'journal_id' : fields.many2one('account.journal', 'Sale Journal',
domain=[('type', '=', 'sale')],
help="Accounting journal used to post sales entries."),
'currency_id' : fields.function(_get_currency, type="many2one", string="Currency", relation="res.currency"),
'iface_self_checkout' : fields.boolean('Self Checkout Mode', # FIXME : this field is obsolete
help="Check this if this point of sale should open by default in a self checkout mode. If unchecked, Odoo uses the normal cashier mode by default."),
'iface_cashdrawer' : fields.boolean('Cashdrawer', help="Automatically open the cashdrawer"),
'iface_payment_terminal' : fields.boolean('Payment Terminal', help="Enables Payment Terminal integration"),
'iface_electronic_scale' : fields.boolean('Electronic Scale', help="Enables Electronic Scale integration"),
'iface_vkeyboard' : fields.boolean('Virtual KeyBoard', help="Enables an integrated Virtual Keyboard"),
'iface_print_via_proxy' : fields.boolean('Print via Proxy', help="Bypass browser printing and prints via the hardware proxy"),
'iface_scan_via_proxy' : fields.boolean('Scan via Proxy', help="Enable barcode scanning with a remotely connected barcode scanner"),
'iface_invoicing': fields.boolean('Invoicing',help='Enables invoice generation from the Point of Sale'),
'iface_big_scrollbars': fields.boolean('Large Scrollbars',help='For imprecise industrial touchscreens'),
'receipt_header': fields.text('Receipt Header',help="A short text that will be inserted as a header in the printed receipt"),
'receipt_footer': fields.text('Receipt Footer',help="A short text that will be inserted as a footer in the printed receipt"),
'proxy_ip': fields.char('IP Address', help='The hostname or ip address of the hardware proxy, Will be autodetected if left empty', size=45),
'state' : fields.selection(POS_CONFIG_STATE, 'Status', required=True, readonly=True, copy=False),
'sequence_id' : fields.many2one('ir.sequence', 'Order IDs Sequence', readonly=True,
help="This sequence is automatically created by Odoo but you can change it "\
"to customize the reference numbers of your orders.", copy=False),
'session_ids': fields.one2many('pos.session', 'config_id', 'Sessions'),
'group_by' : fields.boolean('Group Journal Items', help="Check this if you want to group the Journal Items by Product while closing a Session"),
'pricelist_id': fields.many2one('product.pricelist','Pricelist', required=True),
'company_id': fields.many2one('res.company', 'Company', required=True),
'barcode_product': fields.char('Product Barcodes', size=64, help='The pattern that identifies product barcodes'),
'barcode_cashier': fields.char('Cashier Barcodes', size=64, help='The pattern that identifies cashier login barcodes'),
'barcode_customer': fields.char('Customer Barcodes',size=64, help='The pattern that identifies customer\'s client card barcodes'),
'barcode_price': fields.char('Price Barcodes', size=64, help='The pattern that identifies a product with a barcode encoded price'),
'barcode_weight': fields.char('Weight Barcodes', size=64, help='The pattern that identifies a product with a barcode encoded weight'),
'barcode_discount': fields.char('Discount Barcodes', size=64, help='The pattern that identifies a product with a barcode encoded discount'),
}
def _check_cash_control(self, cr, uid, ids, context=None):
return all(
(sum(int(journal.cash_control) for journal in record.journal_ids) <= 1)
for record in self.browse(cr, uid, ids, context=context)
)
def _check_company_location(self, cr, uid, ids, context=None):
for config in self.browse(cr, uid, ids, context=context):
if config.stock_location_id.company_id and config.stock_location_id.company_id.id != config.company_id.id:
return False
return True
def _check_company_journal(self, cr, uid, ids, context=None):
for config in self.browse(cr, uid, ids, context=context):
if config.journal_id and config.journal_id.company_id.id != config.company_id.id:
return False
return True
def _check_company_payment(self, cr, uid, ids, context=None):
for config in self.browse(cr, uid, ids, context=context):
journal_ids = [j.id for j in config.journal_ids]
if self.pool['account.journal'].search(cr, uid, [
('id', 'in', journal_ids),
('company_id', '!=', config.company_id.id)
], count=True, context=context):
return False
return True
_constraints = [
(_check_cash_control, "You cannot have two cash controls in one Point Of Sale !", ['journal_ids']),
(_check_company_location, "The company of the stock location is different than the one of point of sale", ['company_id', 'stock_location_id']),
(_check_company_journal, "The company of the sale journal is different than the one of point of sale", ['company_id', 'journal_id']),
(_check_company_payment, "The company of a payment method is different than the one of point of sale", ['company_id', 'journal_ids']),
]
def name_get(self, cr, uid, ids, context=None):
result = []
states = {
'opening_control': _('Opening Control'),
'opened': _('In Progress'),
'closing_control': _('Closing Control'),
'closed': _('Closed & Posted'),
}
for record in self.browse(cr, uid, ids, context=context):
if (not record.session_ids) or (record.session_ids[0].state=='closed'):
result.append((record.id, record.name+' ('+_('not used')+')'))
continue
session = record.session_ids[0]
result.append((record.id, record.name + ' ('+session.user_id.name+')')) #, '+states[session.state]+')'))
return result
def _default_sale_journal(self, cr, uid, context=None):
company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
res = self.pool.get('account.journal').search(cr, uid, [('type', '=', 'sale'), ('company_id', '=', company_id)], limit=1, context=context)
return res and res[0] or False
def _default_pricelist(self, cr, uid, context=None):
res = self.pool.get('product.pricelist').search(cr, uid, [('type', '=', 'sale')], limit=1, context=context)
return res and res[0] or False
def _get_default_location(self, cr, uid, context=None):
wh_obj = self.pool.get('stock.warehouse')
user = self.pool.get('res.users').browse(cr, uid, uid, context)
res = wh_obj.search(cr, uid, [('company_id', '=', user.company_id.id)], limit=1, context=context)
if res and res[0]:
return wh_obj.browse(cr, uid, res[0], context=context).lot_stock_id.id
return False
def _get_default_company(self, cr, uid, context=None):
company_id = self.pool.get('res.users')._get_company(cr, uid, context=context)
return company_id
_defaults = {
'state' : POS_CONFIG_STATE[0][0],
'journal_id': _default_sale_journal,
'group_by' : True,
'pricelist_id': _default_pricelist,
'iface_invoicing': True,
'stock_location_id': _get_default_location,
'company_id': _get_default_company,
'barcode_product': '*',
'barcode_cashier': '041*',
'barcode_customer':'042*',
'barcode_weight': '21xxxxxNNDDD',
'barcode_discount':'22xxxxxxxxNN',
'barcode_price': '23xxxxxNNNDD',
}
def onchange_picking_type_id(self, cr, uid, ids, picking_type_id, context=None):
p_type_obj = self.pool.get("stock.picking.type")
p_type = p_type_obj.browse(cr, uid, picking_type_id, context=context)
if p_type.default_location_src_id and p_type.default_location_src_id.usage == 'internal' and p_type.default_location_dest_id and p_type.default_location_dest_id.usage == 'customer':
return {'value': {'stock_location_id': p_type.default_location_src_id.id}}
return False
def set_active(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state' : 'active'}, context=context)
def set_inactive(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state' : 'inactive'}, context=context)
def set_deprecate(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state' : 'deprecated'}, context=context)
def create(self, cr, uid, values, context=None):
ir_sequence = self.pool.get('ir.sequence')
# force sequence_id field to new pos.order sequence
values['sequence_id'] = ir_sequence.create(cr, uid, {
'name': 'POS Order %s' % values['name'],
'padding': 4,
'prefix': "%s/" % values['name'],
'code': "pos.order",
'company_id': values.get('company_id', False),
}, context=context)
# TODO master: add field sequence_line_id on model
# this make sure we always have one available per company
ir_sequence.create(cr, uid, {
'name': 'POS order line %s' % values['name'],
'padding': 4,
'prefix': "%s/" % values['name'],
'code': "pos.order.line",
'company_id': values.get('company_id', False),
}, context=context)
return super(pos_config, self).create(cr, uid, values, context=context)
def unlink(self, cr, uid, ids, context=None):
for obj in self.browse(cr, uid, ids, context=context):
if obj.sequence_id:
obj.sequence_id.unlink()
return super(pos_config, self).unlink(cr, uid, ids, context=context)
class pos_session(osv.osv):
_name = 'pos.session'
_order = 'id desc'
POS_SESSION_STATE = [
('opening_control', 'Opening Control'), # Signal open
('opened', 'In Progress'), # Signal closing
('closing_control', 'Closing Control'), # Signal close
('closed', 'Closed & Posted'),
]
def _compute_cash_all(self, cr, uid, ids, fieldnames, args, context=None):
result = dict()
for record in self.browse(cr, uid, ids, context=context):
result[record.id] = {
'cash_journal_id' : False,
'cash_register_id' : False,
'cash_control' : False,
}
for st in record.statement_ids:
if st.journal_id.cash_control == True:
result[record.id]['cash_control'] = True
result[record.id]['cash_journal_id'] = st.journal_id.id
result[record.id]['cash_register_id'] = st.id
return result
_columns = {
'config_id' : fields.many2one('pos.config', 'Point of Sale',
help="The physical point of sale you will use.",
required=True,
select=1,
domain="[('state', '=', 'active')]",
),
'name' : fields.char('Session ID', required=True, readonly=True),
'user_id' : fields.many2one('res.users', 'Responsible',
required=True,
select=1,
readonly=True,
states={'opening_control' : [('readonly', False)]}
),
'currency_id' : fields.related('config_id', 'currency_id', type="many2one", relation='res.currency', string="Currnecy"),
'start_at' : fields.datetime('Opening Date', readonly=True),
'stop_at' : fields.datetime('Closing Date', readonly=True),
'state' : fields.selection(POS_SESSION_STATE, 'Status',
required=True, readonly=True,
select=1, copy=False),
'sequence_number': fields.integer('Order Sequence Number', help='A sequence number that is incremented with each order'),
'login_number': fields.integer('Login Sequence Number', help='A sequence number that is incremented each time a user resumes the pos session'),
'cash_control' : fields.function(_compute_cash_all,
multi='cash',
type='boolean', string='Has Cash Control'),
'cash_journal_id' : fields.function(_compute_cash_all,
multi='cash',
type='many2one', relation='account.journal',
string='Cash Journal', store=True),
'cash_register_id' : fields.function(_compute_cash_all,
multi='cash',
type='many2one', relation='account.bank.statement',
string='Cash Register', store=True),
'opening_details_ids' : fields.related('cash_register_id', 'opening_details_ids',
type='one2many', relation='account.cashbox.line',
string='Opening Cash Control'),
'details_ids' : fields.related('cash_register_id', 'details_ids',
type='one2many', relation='account.cashbox.line',
string='Cash Control'),
'cash_register_balance_end_real' : fields.related('cash_register_id', 'balance_end_real',
type='float',
digits_compute=dp.get_precision('Account'),
string="Ending Balance",
help="Total of closing cash control lines.",
readonly=True),
'cash_register_balance_start' : fields.related('cash_register_id', 'balance_start',
type='float',
digits_compute=dp.get_precision('Account'),
string="Starting Balance",
help="Total of opening cash control lines.",
readonly=True),
'cash_register_total_entry_encoding' : fields.related('cash_register_id', 'total_entry_encoding',
string='Total Cash Transaction',
readonly=True,
help="Total of all paid sale orders"),
'cash_register_balance_end' : fields.related('cash_register_id', 'balance_end',
type='float',
digits_compute=dp.get_precision('Account'),
string="Theoretical Closing Balance",
help="Sum of opening balance and transactions.",
readonly=True),
'cash_register_difference' : fields.related('cash_register_id', 'difference',
type='float',
string='Difference',
help="Difference between the theoretical closing balance and the real closing balance.",
readonly=True),
'journal_ids' : fields.related('config_id', 'journal_ids',
type='many2many',
readonly=True,
relation='account.journal',
string='Available Payment Methods'),
'order_ids' : fields.one2many('pos.order', 'session_id', 'Orders'),
'statement_ids' : fields.one2many('account.bank.statement', 'pos_session_id', 'Bank Statement', readonly=True),
}
_defaults = {
'name' : '/',
'user_id' : lambda obj, cr, uid, context: uid,
'state' : 'opening_control',
'sequence_number': 1,
'login_number': 0,
}
_sql_constraints = [
('uniq_name', 'unique(name)', "The name of this POS Session must be unique !"),
]
def _check_unicity(self, cr, uid, ids, context=None):
for session in self.browse(cr, uid, ids, context=None):
# open if there is no session in 'opening_control', 'opened', 'closing_control' for one user
domain = [
('state', 'not in', ('closed','closing_control')),
('user_id', '=', session.user_id.id)
]
count = self.search_count(cr, uid, domain, context=context)
if count>1:
return False
return True
def _check_pos_config(self, cr, uid, ids, context=None):
for session in self.browse(cr, uid, ids, context=None):
domain = [
('state', '!=', 'closed'),
('config_id', '=', session.config_id.id)
]
count = self.search_count(cr, uid, domain, context=context)
if count>1:
return False
return True
_constraints = [
(_check_unicity, "You cannot create two active sessions with the same responsible!", ['user_id', 'state']),
(_check_pos_config, "You cannot create two active sessions related to the same point of sale!", ['config_id']),
]
def create(self, cr, uid, values, context=None):
context = dict(context or {})
config_id = values.get('config_id', False) or context.get('default_config_id', False)
if not config_id:
raise osv.except_osv( _('Error!'),
_("You should assign a Point of Sale to your session."))
# journal_id is not required on the pos_config because it does not
# exists at the installation. If nothing is configured at the
# installation we do the minimal configuration. Impossible to do in
# the .xml files as the CoA is not yet installed.
jobj = self.pool.get('pos.config')
pos_config = jobj.browse(cr, uid, config_id, context=context)
context.update({'company_id': pos_config.company_id.id})
if not pos_config.journal_id:
jid = jobj.default_get(cr, uid, ['journal_id'], context=context)['journal_id']
if jid:
jobj.write(cr, openerp.SUPERUSER_ID, [pos_config.id], {'journal_id': jid}, context=context)
else:
raise osv.except_osv( _('error!'),
_("Unable to open the session. You have to assign a sale journal to your point of sale."))
# define some cash journal if no payment method exists
if not pos_config.journal_ids:
journal_proxy = self.pool.get('account.journal')
cashids = journal_proxy.search(cr, uid, [('journal_user', '=', True), ('type','=','cash')], context=context)
if not cashids:
cashids = journal_proxy.search(cr, uid, [('type', '=', 'cash')], context=context)
if not cashids:
cashids = journal_proxy.search(cr, uid, [('journal_user','=',True)], context=context)
journal_proxy.write(cr, openerp.SUPERUSER_ID, cashids, {'journal_user': True})
jobj.write(cr, openerp.SUPERUSER_ID, [pos_config.id], {'journal_ids': [(6,0, cashids)]})
pos_config = jobj.browse(cr, uid, config_id, context=context)
bank_statement_ids = []
for journal in pos_config.journal_ids:
bank_values = {
'journal_id' : journal.id,
'user_id' : uid,
'company_id' : pos_config.company_id.id
}
statement_id = self.pool.get('account.bank.statement').create(cr, uid, bank_values, context=context)
bank_statement_ids.append(statement_id)
values.update({
'name': self.pool['ir.sequence'].get(cr, uid, 'pos.session', context=context),
'statement_ids' : [(6, 0, bank_statement_ids)],
'config_id': config_id
})
return super(pos_session, self).create(cr, uid, values, context=context)
def unlink(self, cr, uid, ids, context=None):
for obj in self.browse(cr, uid, ids, context=context):
for statement in obj.statement_ids:
statement.unlink(context=context)
return super(pos_session, self).unlink(cr, uid, ids, context=context)
def open_cb(self, cr, uid, ids, context=None):
"""
call the Point Of Sale interface and set the pos.session to 'opened' (in progress)
"""
context = dict(context or {})
if isinstance(ids, (int, long)):
ids = [ids]
this_record = self.browse(cr, uid, ids[0], context=context)
this_record.signal_workflow('open')
context.update(active_id=this_record.id)
return {
'type' : 'ir.actions.act_url',
'url' : '/pos/web/',
'target': 'self',
}
def login(self, cr, uid, ids, context=None):
this_record = self.browse(cr, uid, ids[0], context=context)
this_record.write({
'login_number': this_record.login_number+1,
})
def wkf_action_open(self, cr, uid, ids, context=None):
# second browse because we need to refetch the data from the DB for cash_register_id
for record in self.browse(cr, uid, ids, context=context):
values = {}
if not record.start_at:
values['start_at'] = time.strftime('%Y-%m-%d %H:%M:%S')
values['state'] = 'opened'
record.write(values)
for st in record.statement_ids:
st.button_open()
return self.open_frontend_cb(cr, uid, ids, context=context)
def wkf_action_opening_control(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state' : 'opening_control'}, context=context)
def wkf_action_closing_control(self, cr, uid, ids, context=None):
for session in self.browse(cr, uid, ids, context=context):
for statement in session.statement_ids:
if (statement != session.cash_register_id) and (statement.balance_end != statement.balance_end_real):
self.pool.get('account.bank.statement').write(cr, uid, [statement.id], {'balance_end_real': statement.balance_end})
return self.write(cr, uid, ids, {'state' : 'closing_control', 'stop_at' : time.strftime('%Y-%m-%d %H:%M:%S')}, context=context)
def wkf_action_close(self, cr, uid, ids, context=None):
# Close CashBox
for record in self.browse(cr, uid, ids, context=context):
for st in record.statement_ids:
if abs(st.difference) > st.journal_id.amount_authorized_diff:
# The pos manager can close statements with maximums.
if not self.pool.get('ir.model.access').check_groups(cr, uid, "point_of_sale.group_pos_manager"):
raise osv.except_osv( _('Error!'),
_("Your ending balance is too different from the theoretical cash closing (%.2f), the maximum allowed is: %.2f. You can contact your manager to force it.") % (st.difference, st.journal_id.amount_authorized_diff))
if (st.journal_id.type not in ['bank', 'cash']):
raise osv.except_osv(_('Error!'),
_("The type of the journal for your payment method should be bank or cash "))
getattr(st, 'button_confirm_%s' % st.journal_id.type)(context=context)
self._confirm_orders(cr, uid, ids, context=context)
self.write(cr, uid, ids, {'state' : 'closed'}, context=context)
obj = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'point_of_sale', 'menu_point_root')[1]
return {
'type' : 'ir.actions.client',
'name' : 'Point of Sale Menu',
'tag' : 'reload',
'params' : {'menu_id': obj},
}
def _confirm_orders(self, cr, uid, ids, context=None):
pos_order_obj = self.pool.get('pos.order')
for session in self.browse(cr, uid, ids, context=context):
company_id = session.config_id.journal_id.company_id.id
local_context = dict(context or {}, force_company=company_id)
order_ids = [order.id for order in session.order_ids if order.state == 'paid']
move_id = pos_order_obj._create_account_move(cr, uid, session.start_at, session.name, session.config_id.journal_id.id, company_id, context=context)
pos_order_obj._create_account_move_line(cr, uid, order_ids, session, move_id, context=local_context)
for order in session.order_ids:
if order.state == 'done':
continue
if order.state not in ('paid', 'invoiced'):
raise osv.except_osv(
_('Error!'),
_("You cannot confirm all orders of this session, because they have not the 'paid' status"))
else:
pos_order_obj.signal_workflow(cr, uid, [order.id], 'done')
return True
def open_frontend_cb(self, cr, uid, ids, context=None):
context = dict(context or {})
if not ids:
return {}
for session in self.browse(cr, uid, ids, context=context):
if session.user_id.id != uid:
raise osv.except_osv(
_('Error!'),
_("You cannot use the session of another users. This session is owned by %s. Please first close this one to use this point of sale." % session.user_id.name))
context.update({'active_id': ids[0]})
return {
'type' : 'ir.actions.act_url',
'target': 'self',
'url': '/pos/web/',
}
class pos_order(osv.osv):
_name = "pos.order"
_description = "Point of Sale"
_order = "id desc"
def _amount_line_tax(self, cr, uid, line, context=None):
account_tax_obj = self.pool['account.tax']
taxes_ids = [tax for tax in line.product_id.taxes_id if tax.company_id.id == line.order_id.company_id.id]
price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)
taxes = account_tax_obj.compute_all(cr, uid, taxes_ids, price, line.qty, product=line.product_id, partner=line.order_id.partner_id or False)['taxes']
val = 0.0
for c in taxes:
val += c.get('amount', 0.0)
return val
def _order_fields(self, cr, uid, ui_order, context=None):
return {
'name': ui_order['name'],
'user_id': ui_order['user_id'] or False,
'session_id': ui_order['pos_session_id'],
'lines': ui_order['lines'],
'pos_reference':ui_order['name'],
'partner_id': ui_order['partner_id'] or False,
}
def _payment_fields(self, cr, uid, ui_paymentline, context=None):
return {
'amount': ui_paymentline['amount'] or 0.0,
'payment_date': ui_paymentline['name'],
'statement_id': ui_paymentline['statement_id'],
'payment_name': ui_paymentline.get('note',False),
'journal': ui_paymentline['journal_id'],
}
# This deals with orders that belong to a closed session. In order
# to recover from this we:
# - assign the order to another compatible open session
# - if that doesn't exist, create a new one
def _get_valid_session(self, cr, uid, order, context=None):
session = self.pool.get('pos.session')
closed_session = session.browse(cr, uid, order['pos_session_id'], context=context)
open_sessions = session.search(cr, uid, [('state', '=', 'opened'),
('config_id', '=', closed_session.config_id.id),
('user_id', '=', closed_session.user_id.id)],
limit=1, order="start_at DESC", context=context)
if open_sessions:
return open_sessions[0]
else:
new_session_id = session.create(cr, uid, {
'config_id': closed_session.config_id.id,
}, context=context)
new_session = session.browse(cr, uid, new_session_id, context=context)
# bypass opening_control (necessary when using cash control)
new_session.signal_workflow('open')
return new_session_id
def _process_order(self, cr, uid, order, context=None):
session = self.pool.get('pos.session').browse(cr, uid, order['pos_session_id'], context=context)
if session.state == 'closing_control' or session.state == 'closed':
session_id = self._get_valid_session(cr, uid, order, context=context)
session = self.pool.get('pos.session').browse(cr, uid, session_id, context=context)
order['pos_session_id'] = session_id
order_id = self.create(cr, uid, self._order_fields(cr, uid, order, context=context),context)
journal_ids = set()
for payments in order['statement_ids']:
self.add_payment(cr, uid, order_id, self._payment_fields(cr, uid, payments[2], context=context), context=context)
journal_ids.add(payments[2]['journal_id'])
if session.sequence_number <= order['sequence_number']:
session.write({'sequence_number': order['sequence_number'] + 1})
session.refresh()
if not float_is_zero(order['amount_return'], self.pool.get('decimal.precision').precision_get(cr, uid, 'Account')):
cash_journal = session.cash_journal_id.id
if not cash_journal:
# Select for change one of the cash journals used in this payment
cash_journal_ids = self.pool['account.journal'].search(cr, uid, [
('type', '=', 'cash'),
('id', 'in', list(journal_ids)),
], limit=1, context=context)
if not cash_journal_ids:
# If none, select for change one of the cash journals of the POS
# This is used for example when a customer pays by credit card
# an amount higher than total amount of the order and gets cash back
cash_journal_ids = [statement.journal_id.id for statement in session.statement_ids
if statement.journal_id.type == 'cash']
if not cash_journal_ids:
raise osv.except_osv( _('error!'),
_("No cash statement found for this session. Unable to record returned cash."))
cash_journal = cash_journal_ids[0]
self.add_payment(cr, uid, order_id, {
'amount': -order['amount_return'],
'payment_date': time.strftime('%Y-%m-%d %H:%M:%S'),
'payment_name': _('return'),
'journal': cash_journal,
}, context=context)
return order_id
def create_from_ui(self, cr, uid, orders, context=None):
# Keep only new orders
submitted_references = [o['data']['name'] for o in orders]
existing_order_ids = self.search(cr, uid, [('pos_reference', 'in', submitted_references)], context=context)
existing_orders = self.read(cr, uid, existing_order_ids, ['pos_reference'], context=context)
existing_references = set([o['pos_reference'] for o in existing_orders])
orders_to_save = [o for o in orders if o['data']['name'] not in existing_references]
order_ids = []
for tmp_order in orders_to_save:
to_invoice = tmp_order['to_invoice']
order = tmp_order['data']
order_id = self._process_order(cr, uid, order, context=context)
order_ids.append(order_id)
try:
self.signal_workflow(cr, uid, [order_id], 'paid')
except psycopg2.OperationalError:
# do not hide transactional errors, the order(s) won't be saved!
raise
except Exception as e:
_logger.error('Could not fully process the POS Order: %s', tools.ustr(e))
if to_invoice:
self.action_invoice(cr, uid, [order_id], context)
order_obj = self.browse(cr, uid, order_id, context)
self.pool['account.invoice'].signal_workflow(cr, uid, [order_obj.invoice_id.id], 'invoice_open')
return order_ids
def write(self, cr, uid, ids, vals, context=None):
res = super(pos_order, self).write(cr, uid, ids, vals, context=context)
#If you change the partner of the PoS order, change also the partner of the associated bank statement lines
partner_obj = self.pool.get('res.partner')
bsl_obj = self.pool.get("account.bank.statement.line")
if 'partner_id' in vals:
for posorder in self.browse(cr, uid, ids, context=context):
if posorder.invoice_id:
raise osv.except_osv( _('Error!'), _("You cannot change the partner of a POS order for which an invoice has already been issued."))
if vals['partner_id']:
p_id = partner_obj.browse(cr, uid, vals['partner_id'], context=context)
part_id = partner_obj._find_accounting_partner(p_id).id
else:
part_id = False
bsl_ids = [x.id for x in posorder.statement_ids]
bsl_obj.write(cr, uid, bsl_ids, {'partner_id': part_id}, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
for rec in self.browse(cr, uid, ids, context=context):
if rec.state not in ('draft','cancel'):
raise osv.except_osv(_('Unable to Delete!'), _('In order to delete a sale, it must be new or cancelled.'))
return super(pos_order, self).unlink(cr, uid, ids, context=context)
def onchange_partner_id(self, cr, uid, ids, part=False, context=None):
if not part:
return {'value': {}}
pricelist = self.pool.get('res.partner').browse(cr, uid, part, context=context).property_product_pricelist.id
return {'value': {'pricelist_id': pricelist}}
def _amount_all(self, cr, uid, ids, name, args, context=None):
cur_obj = self.pool.get('res.currency')
res = {}
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = {
'amount_paid': 0.0,
'amount_return':0.0,
'amount_tax':0.0,
}
val1 = val2 = 0.0
cur = order.pricelist_id.currency_id
for payment in order.statement_ids:
res[order.id]['amount_paid'] += payment.amount
res[order.id]['amount_return'] += (payment.amount < 0 and payment.amount or 0)
for line in order.lines:
val1 += self._amount_line_tax(cr, uid, line, context=context)
val2 += line.price_subtotal
res[order.id]['amount_tax'] = cur_obj.round(cr, uid, cur, val1)
amount_untaxed = cur_obj.round(cr, uid, cur, val2)
res[order.id]['amount_total'] = res[order.id]['amount_tax'] + amount_untaxed
return res
_columns = {
'name': fields.char('Order Ref', required=True, readonly=True, copy=False),
'company_id':fields.many2one('res.company', 'Company', required=True, readonly=True),
'date_order': fields.datetime('Order Date', readonly=True, select=True),
'user_id': fields.many2one('res.users', 'Salesman', help="Person who uses the the cash register. It can be a reliever, a student or an interim employee."),
'amount_tax': fields.function(_amount_all, string='Taxes', digits_compute=dp.get_precision('Account'), multi='all'),
'amount_total': fields.function(_amount_all, string='Total', digits_compute=dp.get_precision('Account'), multi='all'),
'amount_paid': fields.function(_amount_all, string='Paid', states={'draft': [('readonly', False)]}, readonly=True, digits_compute=dp.get_precision('Account'), multi='all'),
'amount_return': fields.function(_amount_all, 'Returned', digits_compute=dp.get_precision('Account'), multi='all'),
'lines': fields.one2many('pos.order.line', 'order_id', 'Order Lines', states={'draft': [('readonly', False)]}, readonly=True, copy=True),
'statement_ids': fields.one2many('account.bank.statement.line', 'pos_statement_id', 'Payments', states={'draft': [('readonly', False)]}, readonly=True),
'pricelist_id': fields.many2one('product.pricelist', 'Pricelist', required=True, states={'draft': [('readonly', False)]}, readonly=True),
'partner_id': fields.many2one('res.partner', 'Customer', change_default=True, select=1, states={'draft': [('readonly', False)], 'paid': [('readonly', False)]}),
'sequence_number': fields.integer('Sequence Number', help='A session-unique sequence number for the order'),
'session_id' : fields.many2one('pos.session', 'Session',
#required=True,
select=1,
domain="[('state', '=', 'opened')]",
states={'draft' : [('readonly', False)]},
readonly=True),
'state': fields.selection([('draft', 'New'),
('cancel', 'Cancelled'),
('paid', 'Paid'),
('done', 'Posted'),
('invoiced', 'Invoiced')],
'Status', readonly=True, copy=False),
'invoice_id': fields.many2one('account.invoice', 'Invoice', copy=False),
'account_move': fields.many2one('account.move', 'Journal Entry', readonly=True, copy=False),
'picking_id': fields.many2one('stock.picking', 'Picking', readonly=True, copy=False),
'picking_type_id': fields.related('session_id', 'config_id', 'picking_type_id', string="Picking Type", type='many2one', relation='stock.picking.type'),
'location_id': fields.related('session_id', 'config_id', 'stock_location_id', string="Location", type='many2one', store=True, relation='stock.location'),
'note': fields.text('Internal Notes'),
'nb_print': fields.integer('Number of Print', readonly=True, copy=False),
'pos_reference': fields.char('Receipt Ref', readonly=True, copy=False),
'sale_journal': fields.related('session_id', 'config_id', 'journal_id', relation='account.journal', type='many2one', string='Sale Journal', store=True, readonly=True),
}
def _default_session(self, cr, uid, context=None):
so = self.pool.get('pos.session')
session_ids = so.search(cr, uid, [('state','=', 'opened'), ('user_id','=',uid)], context=context)
return session_ids and session_ids[0] or False
def _default_pricelist(self, cr, uid, context=None):
session_ids = self._default_session(cr, uid, context)
if session_ids:
session_record = self.pool.get('pos.session').browse(cr, uid, session_ids, context=context)
return session_record.config_id.pricelist_id and session_record.config_id.pricelist_id.id or False
return False
def _get_out_picking_type(self, cr, uid, context=None):
return self.pool.get('ir.model.data').xmlid_to_res_id(
cr, uid, 'point_of_sale.picking_type_posout', context=context)
_defaults = {
'user_id': lambda self, cr, uid, context: uid,
'state': 'draft',
'name': '/',
'date_order': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'nb_print': 0,
'sequence_number': 1,
'session_id': _default_session,
'company_id': lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.id,
'pricelist_id': _default_pricelist,
}
def create(self, cr, uid, values, context=None):
if values.get('session_id'):
# set name based on the sequence specified on the config
session = self.pool['pos.session'].browse(cr, uid, values['session_id'], context=context)
values['name'] = session.config_id.sequence_id._next()
else:
# fallback on any pos.order sequence
values['name'] = self.pool.get('ir.sequence').get_id(cr, uid, 'pos.order', 'code', context=context)
return super(pos_order, self).create(cr, uid, values, context=context)
def test_paid(self, cr, uid, ids, context=None):
"""A Point of Sale is paid when the sum
@return: True
"""
for order in self.browse(cr, uid, ids, context=context):
if order.lines and not order.amount_total:
return True
if (not order.lines) or (not order.statement_ids) or \
(abs(order.amount_total-order.amount_paid) > 0.00001):
return False
return True
def create_picking(self, cr, uid, ids, context=None):
"""Create a picking for each order and validate it."""
picking_obj = self.pool.get('stock.picking')
partner_obj = self.pool.get('res.partner')
move_obj = self.pool.get('stock.move')
for order in self.browse(cr, uid, ids, context=context):
if all(t == 'service' for t in order.lines.mapped('product_id.type')):
continue
addr = order.partner_id and partner_obj.address_get(cr, uid, [order.partner_id.id], ['delivery']) or {}
picking_type = order.picking_type_id
picking_id = False
if picking_type:
picking_id = picking_obj.create(cr, uid, {
'origin': order.name,
'partner_id': addr.get('delivery',False),
'date_done' : order.date_order,
'picking_type_id': picking_type.id,
'company_id': order.company_id.id,
'move_type': 'direct',
'note': order.note or "",
'invoice_state': 'none',
}, context=context)
self.write(cr, uid, [order.id], {'picking_id': picking_id}, context=context)
location_id = order.location_id.id
if order.partner_id:
destination_id = order.partner_id.property_stock_customer.id
elif picking_type:
if not picking_type.default_location_dest_id:
raise osv.except_osv(_('Error!'), _('Missing source or destination location for picking type %s. Please configure those fields and try again.' % (picking_type.name,)))
destination_id = picking_type.default_location_dest_id.id
else:
destination_id = partner_obj.default_get(cr, uid, ['property_stock_customer'], context=context)['property_stock_customer']
move_list = []
for line in order.lines:
if line.product_id and line.product_id.type == 'service':
continue
move_list.append(move_obj.create(cr, uid, {
'name': line.name,
'product_uom': line.product_id.uom_id.id,
'product_uos': line.product_id.uom_id.id,
'picking_id': picking_id,
'picking_type_id': picking_type.id,
'product_id': line.product_id.id,
'product_uos_qty': abs(line.qty),
'product_uom_qty': abs(line.qty),
'state': 'draft',
'location_id': location_id if line.qty >= 0 else destination_id,
'location_dest_id': destination_id if line.qty >= 0 else location_id,
}, context=context))
if picking_id:
picking_obj.action_confirm(cr, uid, [picking_id], context=context)
picking_obj.force_assign(cr, uid, [picking_id], context=context)
picking_obj.action_done(cr, uid, [picking_id], context=context)
elif move_list:
move_obj.action_confirm(cr, uid, move_list, context=context)
move_obj.force_assign(cr, uid, move_list, context=context)
move_obj.action_done(cr, uid, move_list, context=context)
return True
def cancel_order(self, cr, uid, ids, context=None):
""" Changes order state to cancel
@return: True
"""
stock_picking_obj = self.pool.get('stock.picking')
for order in self.browse(cr, uid, ids, context=context):
stock_picking_obj.action_cancel(cr, uid, [order.picking_id.id])
if stock_picking_obj.browse(cr, uid, order.picking_id.id, context=context).state <> 'cancel':
raise osv.except_osv(_('Error!'), _('Unable to cancel the picking.'))
self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
return True
def add_payment(self, cr, uid, order_id, data, context=None):
"""Create a new payment for the order"""
context = dict(context or {})
statement_line_obj = self.pool.get('account.bank.statement.line')
property_obj = self.pool.get('ir.property')
order = self.browse(cr, uid, order_id, context=context)
date = data.get('payment_date', time.strftime('%Y-%m-%d'))
if len(date) > 10:
timestamp = datetime.strptime(date, tools.DEFAULT_SERVER_DATETIME_FORMAT)
ts = fields.datetime.context_timestamp(cr, uid, timestamp, context)
date = ts.strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
args = {
'amount': data['amount'],
'date': date,
'name': order.name + ': ' + (data.get('payment_name', '') or ''),
'partner_id': order.partner_id and self.pool.get("res.partner")._find_accounting_partner(order.partner_id).id or False,
}
journal_id = data.get('journal', False)
statement_id = data.get('statement_id', False)
assert journal_id or statement_id, "No statement_id or journal_id passed to the method!"
journal = self.pool['account.journal'].browse(cr, uid, journal_id, context=context)
# use the company of the journal and not of the current user
company_cxt = dict(context, force_company=journal.company_id.id)
account_def = property_obj.get(cr, uid, 'property_account_receivable', 'res.partner', context=company_cxt)
args['account_id'] = (order.partner_id and order.partner_id.property_account_receivable \
and order.partner_id.property_account_receivable.id) or (account_def and account_def.id) or False
if not args['account_id']:
if not args['partner_id']:
msg = _('There is no receivable account defined to make payment.')
else:
msg = _('There is no receivable account defined to make payment for the partner: "%s" (id:%d).') % (order.partner_id.name, order.partner_id.id,)
raise osv.except_osv(_('Configuration Error!'), msg)
context.pop('pos_session_id', False)
for statement in order.session_id.statement_ids:
if statement.id == statement_id:
journal_id = statement.journal_id.id
break
elif statement.journal_id.id == journal_id:
statement_id = statement.id
break
if not statement_id:
raise osv.except_osv(_('Error!'), _('You have to open at least one cashbox.'))
args.update({
'statement_id': statement_id,
'pos_statement_id': order_id,
'journal_id': journal_id,
'ref': order.session_id.name,
})
statement_line_obj.create(cr, uid, args, context=context)
return statement_id
def refund(self, cr, uid, ids, context=None):
"""Create a copy of order for refund order"""
clone_list = []
line_obj = self.pool.get('pos.order.line')
for order in self.browse(cr, uid, ids, context=context):
current_session_ids = self.pool.get('pos.session').search(cr, uid, [
('state', '!=', 'closed'),
('user_id', '=', uid)], context=context)
if not current_session_ids:
raise osv.except_osv(_('Error!'), _('To return product(s), you need to open a session that will be used to register the refund.'))
clone_id = self.copy(cr, uid, order.id, {
'name': order.name + ' REFUND', # not used, name forced by create
'session_id': current_session_ids[0],
'date_order': time.strftime('%Y-%m-%d %H:%M:%S'),
}, context=context)
clone_list.append(clone_id)
for clone in self.browse(cr, uid, clone_list, context=context):
for order_line in clone.lines:
line_obj.write(cr, uid, [order_line.id], {
'qty': -order_line.qty
}, context=context)
abs = {
'name': _('Return Products'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'pos.order',
'res_id':clone_list[0],
'view_id': False,
'context':context,
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'current',
}
return abs
def action_invoice_state(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state':'invoiced'}, context=context)
def action_invoice(self, cr, uid, ids, context=None):
inv_ref = self.pool.get('account.invoice')
inv_line_ref = self.pool.get('account.invoice.line')
product_obj = self.pool.get('product.product')
inv_ids = []
for order in self.pool.get('pos.order').browse(cr, uid, ids, context=context):
if order.invoice_id:
inv_ids.append(order.invoice_id.id)
continue
if not order.partner_id:
raise osv.except_osv(_('Error!'), _('Please provide a partner for the sale.'))
acc = order.partner_id.property_account_receivable.id
inv = {
'name': order.name,
'origin': order.name,
'account_id': acc,
'journal_id': order.sale_journal.id or None,
'type': 'out_invoice',
'reference': order.name,
'partner_id': order.partner_id.id,
'comment': order.note or '',
'currency_id': order.pricelist_id.currency_id.id, # considering partner's sale pricelist's currency
}
inv.update(inv_ref.onchange_partner_id(cr, uid, [], 'out_invoice', order.partner_id.id)['value'])
# FORWARDPORT TO SAAS-6 ONLY!
inv.update({'fiscal_position': False})
if not inv.get('account_id', None):
inv['account_id'] = acc
inv_id = inv_ref.create(cr, uid, inv, context=context)
self.write(cr, uid, [order.id], {'invoice_id': inv_id, 'state': 'invoiced'}, context=context)
inv_ids.append(inv_id)
for line in order.lines:
inv_line = {
'invoice_id': inv_id,
'product_id': line.product_id.id,
'quantity': line.qty,
}
inv_name = product_obj.name_get(cr, uid, [line.product_id.id], context=context)[0][1]
inv_line.update(inv_line_ref.product_id_change(cr, uid, [],
line.product_id.id,
line.product_id.uom_id.id,
line.qty, partner_id = order.partner_id.id)['value'])
if not inv_line.get('account_analytic_id', False):
inv_line['account_analytic_id'] = \
self._prepare_analytic_account(cr, uid, line,
context=context)
inv_line['price_unit'] = line.price_unit
inv_line['discount'] = line.discount
inv_line['name'] = inv_name
inv_line['invoice_line_tax_id'] = [(6, 0, inv_line['invoice_line_tax_id'])]
inv_line_ref.create(cr, uid, inv_line, context=context)
inv_ref.button_reset_taxes(cr, uid, [inv_id], context=context)
self.signal_workflow(cr, uid, [order.id], 'invoice')
inv_ref.signal_workflow(cr, uid, [inv_id], 'validate')
if not inv_ids: return {}
mod_obj = self.pool.get('ir.model.data')
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_form')
res_id = res and res[1] or False
return {
'name': _('Customer Invoice'),
'view_type': 'form',
'view_mode': 'form',
'view_id': [res_id],
'res_model': 'account.invoice',
'context': "{'type':'out_invoice'}",
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'current',
'res_id': inv_ids and inv_ids[0] or False,
}
def create_account_move(self, cr, uid, ids, context=None):
return self._create_account_move_line(cr, uid, ids, None, None, context=context)
def _prepare_analytic_account(self, cr, uid, line, context=None):
'''This method is designed to be inherited in a custom module'''
return False
def _create_account_move(self, cr, uid, dt, ref, journal_id, company_id, context=None):
local_context = dict(context or {}, company_id=company_id)
start_at_datetime = datetime.strptime(dt, tools.DEFAULT_SERVER_DATETIME_FORMAT)
date_tz_user = fields.datetime.context_timestamp(cr, uid, start_at_datetime, context=context)
date_tz_user = date_tz_user.strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
period_id = self.pool['account.period'].find(cr, uid, dt=date_tz_user, context=local_context)
return self.pool['account.move'].create(cr, uid, {'ref': ref, 'journal_id': journal_id, 'period_id': period_id[0]}, context=context)
def _create_account_move_line(self, cr, uid, ids, session=None, move_id=None, context=None):
# Tricky, via the workflow, we only have one id in the ids variable
"""Create a account move line of order grouped by products or not."""
account_move_obj = self.pool.get('account.move')
account_period_obj = self.pool.get('account.period')
account_tax_obj = self.pool.get('account.tax')
property_obj = self.pool.get('ir.property')
cur_obj = self.pool.get('res.currency')
#session_ids = set(order.session_id for order in self.browse(cr, uid, ids, context=context))
if session and not all(session.id == order.session_id.id for order in self.browse(cr, uid, ids, context=context)):
raise osv.except_osv(_('Error!'), _('Selected orders do not have the same session!'))
grouped_data = {}
have_to_group_by = session and session.config_id.group_by or False
def compute_tax(amount, tax, line):
if amount > 0:
tax_code_id = tax['base_code_id']
tax_amount = line.price_subtotal * tax['base_sign']
else:
tax_code_id = tax['ref_base_code_id']
tax_amount = abs(line.price_subtotal) * tax['ref_base_sign']
return (tax_code_id, tax_amount,)
for order in self.browse(cr, uid, ids, context=context):
if order.account_move:
continue
if order.state != 'paid':
continue
current_company = order.sale_journal.company_id
group_tax = {}
account_def = property_obj.get(cr, uid, 'property_account_receivable', 'res.partner', context=context)
order_account = order.partner_id and \
order.partner_id.property_account_receivable and \
order.partner_id.property_account_receivable.id or \
account_def and account_def.id
if move_id is None:
# Create an entry for the sale
move_id = self._create_account_move(cr, uid, order.session_id.start_at, order.name, order.sale_journal.id, order.company_id.id, context=context)
move = account_move_obj.browse(cr, uid, move_id, context=context)
def insert_data(data_type, values):
# if have_to_group_by:
sale_journal_id = order.sale_journal.id
# 'quantity': line.qty,
# 'product_id': line.product_id.id,
values.update({
'date': order.date_order[:10],
'ref': order.name,
'partner_id': order.partner_id and self.pool.get("res.partner")._find_accounting_partner(order.partner_id).id or False,
'journal_id' : sale_journal_id,
'period_id': move.period_id.id,
'move_id' : move_id,
'company_id': current_company.id,
})
if data_type == 'product':
key = ('product', values['partner_id'], values['product_id'], values['analytic_account_id'], values['debit'] > 0)
elif data_type == 'tax':
key = ('tax', values['partner_id'], values['tax_code_id'], values['debit'] > 0)
elif data_type == 'counter_part':
key = ('counter_part', values['partner_id'], values['account_id'], values['debit'] > 0)
else:
return
grouped_data.setdefault(key, [])
# if not have_to_group_by or (not grouped_data[key]):
# grouped_data[key].append(values)
# else:
# pass
if have_to_group_by:
if not grouped_data[key]:
grouped_data[key].append(values)
else:
for line in grouped_data[key]:
if line.get('tax_code_id') == values.get('tax_code_id'):
current_value = line
current_value['quantity'] = current_value.get('quantity', 0.0) + values.get('quantity', 0.0)
current_value['credit'] = current_value.get('credit', 0.0) + values.get('credit', 0.0)
current_value['debit'] = current_value.get('debit', 0.0) + values.get('debit', 0.0)
current_value['tax_amount'] = current_value.get('tax_amount', 0.0) + values.get('tax_amount', 0.0)
break
else:
grouped_data[key].append(values)
else:
grouped_data[key].append(values)
#because of the weird way the pos order is written, we need to make sure there is at least one line,
#because just after the 'for' loop there are references to 'line' and 'income_account' variables (that
#are set inside the for loop)
#TOFIX: a deep refactoring of this method (and class!) is needed in order to get rid of this stupid hack
assert order.lines, _('The POS order must have lines when calling this method')
# Create an move for each order line
cur = order.pricelist_id.currency_id
round_per_line = True
if order.company_id.tax_calculation_rounding_method == 'round_globally':
round_per_line = False
for line in order.lines:
tax_amount = 0
taxes = []
for t in line.product_id.taxes_id:
if t.company_id.id == current_company.id:
taxes.append(t)
computed_taxes = account_tax_obj.compute_all(cr, uid, taxes, line.price_unit * (100.0-line.discount) / 100.0, line.qty)['taxes']
for tax in computed_taxes:
tax_amount += cur_obj.round(cr, uid, cur, tax['amount']) if round_per_line else tax['amount']
if tax_amount < 0:
group_key = (tax['ref_tax_code_id'], tax['base_code_id'], tax['account_collected_id'], tax['id'])
else:
group_key = (tax['tax_code_id'], tax['base_code_id'], tax['account_collected_id'], tax['id'])
group_tax.setdefault(group_key, 0)
group_tax[group_key] += cur_obj.round(cr, uid, cur, tax['amount']) if round_per_line else tax['amount']
amount = line.price_subtotal
# Search for the income account
if line.product_id.property_account_income.id:
income_account = line.product_id.property_account_income.id
elif line.product_id.categ_id.property_account_income_categ.id:
income_account = line.product_id.categ_id.property_account_income_categ.id
else:
raise osv.except_osv(_('Error!'), _('Please define income '\
'account for this product: "%s" (id:%d).') \
% (line.product_id.name, line.product_id.id, ))
# Empty the tax list as long as there is no tax code:
tax_code_id = False
tax_amount = 0
while computed_taxes:
tax = computed_taxes.pop(0)
tax_code_id, tax_amount = compute_tax(amount, tax, line)
# If there is one we stop
if tax_code_id:
break
# Create a move for the line
insert_data('product', {
'name': line.product_id.name,
'quantity': line.qty,
'product_id': line.product_id.id,
'account_id': income_account,
'analytic_account_id': self._prepare_analytic_account(cr, uid, line, context=context),
'credit': ((amount>0) and amount) or 0.0,
'debit': ((amount<0) and -amount) or 0.0,
'tax_code_id': tax_code_id,
'tax_amount': tax_amount,
'partner_id': order.partner_id and self.pool.get("res.partner")._find_accounting_partner(order.partner_id).id or False
})
# For each remaining tax with a code, whe create a move line
for tax in computed_taxes:
tax_code_id, tax_amount = compute_tax(amount, tax, line)
if not tax_code_id:
continue
insert_data('tax', {
'name': _('Tax'),
'product_id':line.product_id.id,
'quantity': line.qty,
'account_id': income_account,
'credit': 0.0,
'debit': 0.0,
'tax_code_id': tax_code_id,
'tax_amount': tax_amount,
'partner_id': order.partner_id and self.pool.get("res.partner")._find_accounting_partner(order.partner_id).id or False
})
# Create a move for each tax group
(tax_code_pos, base_code_pos, account_pos, tax_id)= (0, 1, 2, 3)
for key, tax_amount in group_tax.items():
tax = self.pool.get('account.tax').browse(cr, uid, key[tax_id], context=context)
insert_data('tax', {
'name': _('Tax') + ' ' + tax.name,
'quantity': line.qty,
'product_id': line.product_id.id,
'account_id': key[account_pos] or income_account,
'credit': ((tax_amount>0) and tax_amount) or 0.0,
'debit': ((tax_amount<0) and -tax_amount) or 0.0,
'tax_code_id': key[tax_code_pos],
'tax_amount': abs(tax_amount) * tax.tax_sign if tax_amount>=0 else abs(tax_amount) * tax.ref_tax_sign,
'partner_id': order.partner_id and self.pool.get("res.partner")._find_accounting_partner(order.partner_id).id or False
})
# counterpart
insert_data('counter_part', {
'name': _("Trade Receivables"), #order.name,
'account_id': order_account,
'credit': ((order.amount_total < 0) and -order.amount_total) or 0.0,
'debit': ((order.amount_total > 0) and order.amount_total) or 0.0,
'partner_id': order.partner_id and self.pool.get("res.partner")._find_accounting_partner(order.partner_id).id or False
})
order.write({'state':'done', 'account_move': move_id})
all_lines = []
for group_key, group_data in grouped_data.iteritems():
for value in group_data:
all_lines.append((0, 0, value),)
if move_id: #In case no order was changed
self.pool.get("account.move").write(cr, uid, [move_id], {'line_id':all_lines}, context=context)
return True
def action_payment(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'payment'}, context=context)
def action_paid(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'paid'}, context=context)
self.create_picking(cr, uid, ids, context=context)
return True
def action_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
return True
def action_done(self, cr, uid, ids, context=None):
self.create_account_move(cr, uid, ids, context=context)
return True
class account_bank_statement(osv.osv):
_inherit = 'account.bank.statement'
_columns= {
'user_id': fields.many2one('res.users', 'User', readonly=True),
}
_defaults = {
'user_id': lambda self,cr,uid,c={}: uid
}
class account_bank_statement_line(osv.osv):
_inherit = 'account.bank.statement.line'
_columns= {
'pos_statement_id': fields.many2one('pos.order', ondelete='cascade'),
}
class pos_order_line(osv.osv):
_name = "pos.order.line"
_description = "Lines of Point of Sale"
_rec_name = "product_id"
def _amount_line_all(self, cr, uid, ids, field_names, arg, context=None):
res = dict([(i, {}) for i in ids])
account_tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
for line in self.browse(cr, uid, ids, context=context):
taxes_ids = [ tax for tax in line.product_id.taxes_id if tax.company_id.id == line.order_id.company_id.id ]
price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)
taxes = account_tax_obj.compute_all(cr, uid, taxes_ids, price, line.qty, product=line.product_id, partner=line.order_id.partner_id or False)
cur = line.order_id.pricelist_id.currency_id
res[line.id]['price_subtotal'] = taxes['total']
res[line.id]['price_subtotal_incl'] = taxes['total_included']
return res
def onchange_product_id(self, cr, uid, ids, pricelist, product_id, qty=0, partner_id=False, context=None):
context = context or {}
if not product_id:
return {}
if not pricelist:
raise osv.except_osv(_('No Pricelist!'),
_('You have to select a pricelist in the sale form !\n' \
'Please set one before choosing a product.'))
price = self.pool.get('product.pricelist').price_get(cr, uid, [pricelist],
product_id, qty or 1.0, partner_id)[pricelist]
result = self.onchange_qty(cr, uid, ids, product_id, 0.0, qty, price, context=context)
result['value']['price_unit'] = price
return result
def onchange_qty(self, cr, uid, ids, product, discount, qty, price_unit, context=None):
result = {}
if not product:
return result
account_tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
prod = self.pool.get('product.product').browse(cr, uid, product, context=context)
price = price_unit * (1 - (discount or 0.0) / 100.0)
taxes = account_tax_obj.compute_all(cr, uid, prod.taxes_id, price, qty, product=prod, partner=False)
result['price_subtotal'] = taxes['total']
result['price_subtotal_incl'] = taxes['total_included']
return {'value': result}
_columns = {
'company_id': fields.many2one('res.company', 'Company', required=True),
'name': fields.char('Line No', required=True, copy=False),
'notice': fields.char('Discount Notice'),
'product_id': fields.many2one('product.product', 'Product', domain=[('sale_ok', '=', True)], required=True, change_default=True),
'price_unit': fields.float(string='Unit Price', digits_compute=dp.get_precision('Product Price')),
'qty': fields.float('Quantity', digits_compute=dp.get_precision('Product UoS')),
'price_subtotal': fields.function(_amount_line_all, multi='pos_order_line_amount', digits_compute=dp.get_precision('Product Price'), string='Subtotal w/o Tax', store=True),
'price_subtotal_incl': fields.function(_amount_line_all, multi='pos_order_line_amount', digits_compute=dp.get_precision('Account'), string='Subtotal', store=True),
'discount': fields.float('Discount (%)', digits_compute=dp.get_precision('Account')),
'order_id': fields.many2one('pos.order', 'Order Ref', ondelete='cascade'),
'create_date': fields.datetime('Creation Date', readonly=True),
}
_defaults = {
'name': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'pos.order.line', context=context),
'qty': lambda *a: 1,
'discount': lambda *a: 0.0,
'company_id': lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.id,
}
class ean_wizard(osv.osv_memory):
_name = 'pos.ean_wizard'
_columns = {
'ean13_pattern': fields.char('Reference', size=13, required=True, translate=True),
}
def sanitize_ean13(self, cr, uid, ids, context):
for r in self.browse(cr,uid,ids):
ean13 = openerp.addons.product.product.sanitize_ean13(r.ean13_pattern)
m = context.get('active_model')
m_id = context.get('active_id')
self.pool[m].write(cr,uid,[m_id],{'ean13':ean13})
return { 'type' : 'ir.actions.act_window_close' }
class pos_category(osv.osv):
_name = "pos.category"
_description = "Public Category"
_order = "sequence, name"
_constraints = [
(osv.osv._check_recursion, 'Error ! You cannot create recursive categories.', ['parent_id'])
]
def name_get(self, cr, uid, ids, context=None):
res = []
for cat in self.browse(cr, uid, ids, context=context):
names = [cat.name]
pcat = cat.parent_id
while pcat:
names.append(pcat.name)
pcat = pcat.parent_id
res.append((cat.id, ' / '.join(reversed(names))))
return res
def _name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = self.name_get(cr, uid, ids, context=context)
return dict(res)
def _get_image(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = tools.image_get_resized_images(obj.image)
return result
def _set_image(self, cr, uid, id, name, value, args, context=None):
return self.write(cr, uid, [id], {'image': tools.image_resize_image_big(value)}, context=context)
_columns = {
'name': fields.char('Name', required=True, translate=True),
'complete_name': fields.function(_name_get_fnc, type="char", string='Name'),
'parent_id': fields.many2one('pos.category','Parent Category', select=True),
'child_id': fields.one2many('pos.category', 'parent_id', string='Children Categories'),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of product categories."),
# NOTE: there is no 'default image', because by default we don't show thumbnails for categories. However if we have a thumbnail
# for at least one category, then we display a default image on the other, so that the buttons have consistent styling.
# In this case, the default image is set by the js code.
# NOTE2: image: all image fields are base64 encoded and PIL-supported
'image': fields.binary("Image",
help="This field holds the image used as image for the cateogry, limited to 1024x1024px."),
'image_medium': fields.function(_get_image, fnct_inv=_set_image,
string="Medium-sized image", type="binary", multi="_get_image",
store={
'pos.category': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Medium-sized image of the category. It is automatically "\
"resized as a 128x128px image, with aspect ratio preserved. "\
"Use this field in form views or some kanban views."),
'image_small': fields.function(_get_image, fnct_inv=_set_image,
string="Smal-sized image", type="binary", multi="_get_image",
store={
'pos.category': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Small-sized image of the category. It is automatically "\
"resized as a 64x64px image, with aspect ratio preserved. "\
"Use this field anywhere a small image is required."),
}
class product_template(osv.osv):
_inherit = 'product.template'
_columns = {
'income_pdt': fields.boolean('Point of Sale Cash In', help="Check if, this is a product you can use to put cash into a statement for the point of sale backend."),
'expense_pdt': fields.boolean('Point of Sale Cash Out', help="Check if, this is a product you can use to take cash from a statement for the point of sale backend, example: money lost, transfer to bank, etc."),
'available_in_pos': fields.boolean('Available in the Point of Sale', help='Check if you want this product to appear in the Point of Sale'),
'to_weight' : fields.boolean('To Weigh With Scale', help="Check if the product should be weighted using the hardware scale integration"),
'pos_categ_id': fields.many2one('pos.category','Point of Sale Category', help="Those categories are used to group similar products for point of sale."),
}
_defaults = {
'to_weight' : False,
'available_in_pos': True,
}
def unlink(self, cr, uid, ids, context=None):
product_ctx = dict(context or {}, active_test=False)
if self.search_count(cr, uid, [('id', 'in', ids), ('available_in_pos', '=', True)], context=product_ctx):
if self.pool['pos.session'].search_count(cr, uid, [('state', '!=', 'closed')], context=context):
raise osv.except_osv(_('Error!'),
_('You cannot delete a product saleable in point of sale while a session is still opened.'))
return super(product_template, self).unlink(cr, uid, ids, context=context)
class res_partner(osv.osv):
_inherit = 'res.partner'
def create_from_ui(self, cr, uid, partner, context=None):
""" create or modify a partner from the point of sale ui.
partner contains the partner's fields. """
#image is a dataurl, get the data after the comma
if partner.get('image',False):
img = partner['image'].split(',')[1]
partner['image'] = img
if partner.get('id',False): # Modifying existing partner
partner_id = partner['id']
del partner['id']
self.write(cr, uid, [partner_id], partner, context=context)
else:
partner_id = self.create(cr, uid, partner, context=context)
return partner_id
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
jpshort/odoo | refs/heads/8.0 | addons/l10n_ch/__init__.py | 424 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi. Copyright Camptocamp SA
# Financial contributors: Hasa SA, Open Net SA,
# Prisme Solutions Informatique SA, Quod SA
#
# Translation contributors: brain-tec AG, Agile Business Group
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import account_wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
zhukaixy/kbengine | refs/heads/master | kbe/res/scripts/common/Lib/test/test_asyncio/test_locks.py | 80 | """Tests for lock.py"""
import unittest
from unittest import mock
import re
import asyncio
from asyncio import test_utils
STR_RGX_REPR = (
r'^<(?P<class>.*?) object at (?P<address>.*?)'
r'\[(?P<extras>'
r'(set|unset|locked|unlocked)(,value:\d)?(,waiters:\d+)?'
r')\]>\Z'
)
RGX_REPR = re.compile(STR_RGX_REPR)
class LockTests(test_utils.TestCase):
def setUp(self):
self.loop = self.new_test_loop()
def test_ctor_loop(self):
loop = mock.Mock()
lock = asyncio.Lock(loop=loop)
self.assertIs(lock._loop, loop)
lock = asyncio.Lock(loop=self.loop)
self.assertIs(lock._loop, self.loop)
def test_ctor_noloop(self):
asyncio.set_event_loop(self.loop)
lock = asyncio.Lock()
self.assertIs(lock._loop, self.loop)
def test_repr(self):
lock = asyncio.Lock(loop=self.loop)
self.assertTrue(repr(lock).endswith('[unlocked]>'))
self.assertTrue(RGX_REPR.match(repr(lock)))
@asyncio.coroutine
def acquire_lock():
yield from lock
self.loop.run_until_complete(acquire_lock())
self.assertTrue(repr(lock).endswith('[locked]>'))
self.assertTrue(RGX_REPR.match(repr(lock)))
def test_lock(self):
lock = asyncio.Lock(loop=self.loop)
@asyncio.coroutine
def acquire_lock():
return (yield from lock)
res = self.loop.run_until_complete(acquire_lock())
self.assertTrue(res)
self.assertTrue(lock.locked())
lock.release()
self.assertFalse(lock.locked())
def test_acquire(self):
lock = asyncio.Lock(loop=self.loop)
result = []
self.assertTrue(self.loop.run_until_complete(lock.acquire()))
@asyncio.coroutine
def c1(result):
if (yield from lock.acquire()):
result.append(1)
return True
@asyncio.coroutine
def c2(result):
if (yield from lock.acquire()):
result.append(2)
return True
@asyncio.coroutine
def c3(result):
if (yield from lock.acquire()):
result.append(3)
return True
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
lock.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
t3 = asyncio.Task(c3(result), loop=self.loop)
lock.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1, 2], result)
lock.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1, 2, 3], result)
self.assertTrue(t1.done())
self.assertTrue(t1.result())
self.assertTrue(t2.done())
self.assertTrue(t2.result())
self.assertTrue(t3.done())
self.assertTrue(t3.result())
def test_acquire_cancel(self):
lock = asyncio.Lock(loop=self.loop)
self.assertTrue(self.loop.run_until_complete(lock.acquire()))
task = asyncio.Task(lock.acquire(), loop=self.loop)
self.loop.call_soon(task.cancel)
self.assertRaises(
asyncio.CancelledError,
self.loop.run_until_complete, task)
self.assertFalse(lock._waiters)
def test_cancel_race(self):
# Several tasks:
# - A acquires the lock
# - B is blocked in aqcuire()
# - C is blocked in aqcuire()
#
# Now, concurrently:
# - B is cancelled
# - A releases the lock
#
# If B's waiter is marked cancelled but not yet removed from
# _waiters, A's release() call will crash when trying to set
# B's waiter; instead, it should move on to C's waiter.
# Setup: A has the lock, b and c are waiting.
lock = asyncio.Lock(loop=self.loop)
@asyncio.coroutine
def lockit(name, blocker):
yield from lock.acquire()
try:
if blocker is not None:
yield from blocker
finally:
lock.release()
fa = asyncio.Future(loop=self.loop)
ta = asyncio.Task(lockit('A', fa), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertTrue(lock.locked())
tb = asyncio.Task(lockit('B', None), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual(len(lock._waiters), 1)
tc = asyncio.Task(lockit('C', None), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual(len(lock._waiters), 2)
# Create the race and check.
# Without the fix this failed at the last assert.
fa.set_result(None)
tb.cancel()
self.assertTrue(lock._waiters[0].cancelled())
test_utils.run_briefly(self.loop)
self.assertFalse(lock.locked())
self.assertTrue(ta.done())
self.assertTrue(tb.cancelled())
self.assertTrue(tc.done())
def test_release_not_acquired(self):
lock = asyncio.Lock(loop=self.loop)
self.assertRaises(RuntimeError, lock.release)
def test_release_no_waiters(self):
lock = asyncio.Lock(loop=self.loop)
self.loop.run_until_complete(lock.acquire())
self.assertTrue(lock.locked())
lock.release()
self.assertFalse(lock.locked())
def test_context_manager(self):
lock = asyncio.Lock(loop=self.loop)
@asyncio.coroutine
def acquire_lock():
return (yield from lock)
with self.loop.run_until_complete(acquire_lock()):
self.assertTrue(lock.locked())
self.assertFalse(lock.locked())
def test_context_manager_cant_reuse(self):
lock = asyncio.Lock(loop=self.loop)
@asyncio.coroutine
def acquire_lock():
return (yield from lock)
# This spells "yield from lock" outside a generator.
cm = self.loop.run_until_complete(acquire_lock())
with cm:
self.assertTrue(lock.locked())
self.assertFalse(lock.locked())
with self.assertRaises(AttributeError):
with cm:
pass
def test_context_manager_no_yield(self):
lock = asyncio.Lock(loop=self.loop)
try:
with lock:
self.fail('RuntimeError is not raised in with expression')
except RuntimeError as err:
self.assertEqual(
str(err),
'"yield from" should be used as context manager expression')
self.assertFalse(lock.locked())
class EventTests(test_utils.TestCase):
def setUp(self):
self.loop = self.new_test_loop()
def test_ctor_loop(self):
loop = mock.Mock()
ev = asyncio.Event(loop=loop)
self.assertIs(ev._loop, loop)
ev = asyncio.Event(loop=self.loop)
self.assertIs(ev._loop, self.loop)
def test_ctor_noloop(self):
asyncio.set_event_loop(self.loop)
ev = asyncio.Event()
self.assertIs(ev._loop, self.loop)
def test_repr(self):
ev = asyncio.Event(loop=self.loop)
self.assertTrue(repr(ev).endswith('[unset]>'))
match = RGX_REPR.match(repr(ev))
self.assertEqual(match.group('extras'), 'unset')
ev.set()
self.assertTrue(repr(ev).endswith('[set]>'))
self.assertTrue(RGX_REPR.match(repr(ev)))
ev._waiters.append(mock.Mock())
self.assertTrue('waiters:1' in repr(ev))
self.assertTrue(RGX_REPR.match(repr(ev)))
def test_wait(self):
ev = asyncio.Event(loop=self.loop)
self.assertFalse(ev.is_set())
result = []
@asyncio.coroutine
def c1(result):
if (yield from ev.wait()):
result.append(1)
@asyncio.coroutine
def c2(result):
if (yield from ev.wait()):
result.append(2)
@asyncio.coroutine
def c3(result):
if (yield from ev.wait()):
result.append(3)
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
t3 = asyncio.Task(c3(result), loop=self.loop)
ev.set()
test_utils.run_briefly(self.loop)
self.assertEqual([3, 1, 2], result)
self.assertTrue(t1.done())
self.assertIsNone(t1.result())
self.assertTrue(t2.done())
self.assertIsNone(t2.result())
self.assertTrue(t3.done())
self.assertIsNone(t3.result())
def test_wait_on_set(self):
ev = asyncio.Event(loop=self.loop)
ev.set()
res = self.loop.run_until_complete(ev.wait())
self.assertTrue(res)
def test_wait_cancel(self):
ev = asyncio.Event(loop=self.loop)
wait = asyncio.Task(ev.wait(), loop=self.loop)
self.loop.call_soon(wait.cancel)
self.assertRaises(
asyncio.CancelledError,
self.loop.run_until_complete, wait)
self.assertFalse(ev._waiters)
def test_clear(self):
ev = asyncio.Event(loop=self.loop)
self.assertFalse(ev.is_set())
ev.set()
self.assertTrue(ev.is_set())
ev.clear()
self.assertFalse(ev.is_set())
def test_clear_with_waiters(self):
ev = asyncio.Event(loop=self.loop)
result = []
@asyncio.coroutine
def c1(result):
if (yield from ev.wait()):
result.append(1)
return True
t = asyncio.Task(c1(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
ev.set()
ev.clear()
self.assertFalse(ev.is_set())
ev.set()
ev.set()
self.assertEqual(1, len(ev._waiters))
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
self.assertEqual(0, len(ev._waiters))
self.assertTrue(t.done())
self.assertTrue(t.result())
class ConditionTests(test_utils.TestCase):
def setUp(self):
self.loop = self.new_test_loop()
def test_ctor_loop(self):
loop = mock.Mock()
cond = asyncio.Condition(loop=loop)
self.assertIs(cond._loop, loop)
cond = asyncio.Condition(loop=self.loop)
self.assertIs(cond._loop, self.loop)
def test_ctor_noloop(self):
asyncio.set_event_loop(self.loop)
cond = asyncio.Condition()
self.assertIs(cond._loop, self.loop)
def test_wait(self):
cond = asyncio.Condition(loop=self.loop)
result = []
@asyncio.coroutine
def c1(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(1)
return True
@asyncio.coroutine
def c2(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(2)
return True
@asyncio.coroutine
def c3(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(3)
return True
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
t3 = asyncio.Task(c3(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
self.assertFalse(cond.locked())
self.assertTrue(self.loop.run_until_complete(cond.acquire()))
cond.notify()
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
self.assertTrue(cond.locked())
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
self.assertTrue(cond.locked())
cond.notify(2)
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
self.assertTrue(cond.locked())
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1, 2], result)
self.assertTrue(cond.locked())
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1, 2, 3], result)
self.assertTrue(cond.locked())
self.assertTrue(t1.done())
self.assertTrue(t1.result())
self.assertTrue(t2.done())
self.assertTrue(t2.result())
self.assertTrue(t3.done())
self.assertTrue(t3.result())
def test_wait_cancel(self):
cond = asyncio.Condition(loop=self.loop)
self.loop.run_until_complete(cond.acquire())
wait = asyncio.Task(cond.wait(), loop=self.loop)
self.loop.call_soon(wait.cancel)
self.assertRaises(
asyncio.CancelledError,
self.loop.run_until_complete, wait)
self.assertFalse(cond._waiters)
self.assertTrue(cond.locked())
def test_wait_unacquired(self):
cond = asyncio.Condition(loop=self.loop)
self.assertRaises(
RuntimeError,
self.loop.run_until_complete, cond.wait())
def test_wait_for(self):
cond = asyncio.Condition(loop=self.loop)
presult = False
def predicate():
return presult
result = []
@asyncio.coroutine
def c1(result):
yield from cond.acquire()
if (yield from cond.wait_for(predicate)):
result.append(1)
cond.release()
return True
t = asyncio.Task(c1(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
self.loop.run_until_complete(cond.acquire())
cond.notify()
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
presult = True
self.loop.run_until_complete(cond.acquire())
cond.notify()
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
self.assertTrue(t.done())
self.assertTrue(t.result())
def test_wait_for_unacquired(self):
cond = asyncio.Condition(loop=self.loop)
# predicate can return true immediately
res = self.loop.run_until_complete(cond.wait_for(lambda: [1, 2, 3]))
self.assertEqual([1, 2, 3], res)
self.assertRaises(
RuntimeError,
self.loop.run_until_complete,
cond.wait_for(lambda: False))
def test_notify(self):
cond = asyncio.Condition(loop=self.loop)
result = []
@asyncio.coroutine
def c1(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(1)
cond.release()
return True
@asyncio.coroutine
def c2(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(2)
cond.release()
return True
@asyncio.coroutine
def c3(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(3)
cond.release()
return True
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
t3 = asyncio.Task(c3(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
self.loop.run_until_complete(cond.acquire())
cond.notify(1)
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
self.loop.run_until_complete(cond.acquire())
cond.notify(1)
cond.notify(2048)
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1, 2, 3], result)
self.assertTrue(t1.done())
self.assertTrue(t1.result())
self.assertTrue(t2.done())
self.assertTrue(t2.result())
self.assertTrue(t3.done())
self.assertTrue(t3.result())
def test_notify_all(self):
cond = asyncio.Condition(loop=self.loop)
result = []
@asyncio.coroutine
def c1(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(1)
cond.release()
return True
@asyncio.coroutine
def c2(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(2)
cond.release()
return True
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
self.loop.run_until_complete(cond.acquire())
cond.notify_all()
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1, 2], result)
self.assertTrue(t1.done())
self.assertTrue(t1.result())
self.assertTrue(t2.done())
self.assertTrue(t2.result())
def test_notify_unacquired(self):
cond = asyncio.Condition(loop=self.loop)
self.assertRaises(RuntimeError, cond.notify)
def test_notify_all_unacquired(self):
cond = asyncio.Condition(loop=self.loop)
self.assertRaises(RuntimeError, cond.notify_all)
def test_repr(self):
cond = asyncio.Condition(loop=self.loop)
self.assertTrue('unlocked' in repr(cond))
self.assertTrue(RGX_REPR.match(repr(cond)))
self.loop.run_until_complete(cond.acquire())
self.assertTrue('locked' in repr(cond))
cond._waiters.append(mock.Mock())
self.assertTrue('waiters:1' in repr(cond))
self.assertTrue(RGX_REPR.match(repr(cond)))
cond._waiters.append(mock.Mock())
self.assertTrue('waiters:2' in repr(cond))
self.assertTrue(RGX_REPR.match(repr(cond)))
def test_context_manager(self):
cond = asyncio.Condition(loop=self.loop)
@asyncio.coroutine
def acquire_cond():
return (yield from cond)
with self.loop.run_until_complete(acquire_cond()):
self.assertTrue(cond.locked())
self.assertFalse(cond.locked())
def test_context_manager_no_yield(self):
cond = asyncio.Condition(loop=self.loop)
try:
with cond:
self.fail('RuntimeError is not raised in with expression')
except RuntimeError as err:
self.assertEqual(
str(err),
'"yield from" should be used as context manager expression')
self.assertFalse(cond.locked())
def test_explicit_lock(self):
lock = asyncio.Lock(loop=self.loop)
cond = asyncio.Condition(lock, loop=self.loop)
self.assertIs(cond._lock, lock)
self.assertIs(cond._loop, lock._loop)
def test_ambiguous_loops(self):
loop = self.new_test_loop()
self.addCleanup(loop.close)
lock = asyncio.Lock(loop=self.loop)
with self.assertRaises(ValueError):
asyncio.Condition(lock, loop=loop)
class SemaphoreTests(test_utils.TestCase):
def setUp(self):
self.loop = self.new_test_loop()
def test_ctor_loop(self):
loop = mock.Mock()
sem = asyncio.Semaphore(loop=loop)
self.assertIs(sem._loop, loop)
sem = asyncio.Semaphore(loop=self.loop)
self.assertIs(sem._loop, self.loop)
def test_ctor_noloop(self):
asyncio.set_event_loop(self.loop)
sem = asyncio.Semaphore()
self.assertIs(sem._loop, self.loop)
def test_initial_value_zero(self):
sem = asyncio.Semaphore(0, loop=self.loop)
self.assertTrue(sem.locked())
def test_repr(self):
sem = asyncio.Semaphore(loop=self.loop)
self.assertTrue(repr(sem).endswith('[unlocked,value:1]>'))
self.assertTrue(RGX_REPR.match(repr(sem)))
self.loop.run_until_complete(sem.acquire())
self.assertTrue(repr(sem).endswith('[locked]>'))
self.assertTrue('waiters' not in repr(sem))
self.assertTrue(RGX_REPR.match(repr(sem)))
sem._waiters.append(mock.Mock())
self.assertTrue('waiters:1' in repr(sem))
self.assertTrue(RGX_REPR.match(repr(sem)))
sem._waiters.append(mock.Mock())
self.assertTrue('waiters:2' in repr(sem))
self.assertTrue(RGX_REPR.match(repr(sem)))
def test_semaphore(self):
sem = asyncio.Semaphore(loop=self.loop)
self.assertEqual(1, sem._value)
@asyncio.coroutine
def acquire_lock():
return (yield from sem)
res = self.loop.run_until_complete(acquire_lock())
self.assertTrue(res)
self.assertTrue(sem.locked())
self.assertEqual(0, sem._value)
sem.release()
self.assertFalse(sem.locked())
self.assertEqual(1, sem._value)
def test_semaphore_value(self):
self.assertRaises(ValueError, asyncio.Semaphore, -1)
def test_acquire(self):
sem = asyncio.Semaphore(3, loop=self.loop)
result = []
self.assertTrue(self.loop.run_until_complete(sem.acquire()))
self.assertTrue(self.loop.run_until_complete(sem.acquire()))
self.assertFalse(sem.locked())
@asyncio.coroutine
def c1(result):
yield from sem.acquire()
result.append(1)
return True
@asyncio.coroutine
def c2(result):
yield from sem.acquire()
result.append(2)
return True
@asyncio.coroutine
def c3(result):
yield from sem.acquire()
result.append(3)
return True
@asyncio.coroutine
def c4(result):
yield from sem.acquire()
result.append(4)
return True
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
t3 = asyncio.Task(c3(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
self.assertTrue(sem.locked())
self.assertEqual(2, len(sem._waiters))
self.assertEqual(0, sem._value)
t4 = asyncio.Task(c4(result), loop=self.loop)
sem.release()
sem.release()
self.assertEqual(2, sem._value)
test_utils.run_briefly(self.loop)
self.assertEqual(0, sem._value)
self.assertEqual([1, 2, 3], result)
self.assertTrue(sem.locked())
self.assertEqual(1, len(sem._waiters))
self.assertEqual(0, sem._value)
self.assertTrue(t1.done())
self.assertTrue(t1.result())
self.assertTrue(t2.done())
self.assertTrue(t2.result())
self.assertTrue(t3.done())
self.assertTrue(t3.result())
self.assertFalse(t4.done())
# cleanup locked semaphore
sem.release()
self.loop.run_until_complete(t4)
def test_acquire_cancel(self):
sem = asyncio.Semaphore(loop=self.loop)
self.loop.run_until_complete(sem.acquire())
acquire = asyncio.Task(sem.acquire(), loop=self.loop)
self.loop.call_soon(acquire.cancel)
self.assertRaises(
asyncio.CancelledError,
self.loop.run_until_complete, acquire)
self.assertFalse(sem._waiters)
def test_release_not_acquired(self):
sem = asyncio.BoundedSemaphore(loop=self.loop)
self.assertRaises(ValueError, sem.release)
def test_release_no_waiters(self):
sem = asyncio.Semaphore(loop=self.loop)
self.loop.run_until_complete(sem.acquire())
self.assertTrue(sem.locked())
sem.release()
self.assertFalse(sem.locked())
def test_context_manager(self):
sem = asyncio.Semaphore(2, loop=self.loop)
@asyncio.coroutine
def acquire_lock():
return (yield from sem)
with self.loop.run_until_complete(acquire_lock()):
self.assertFalse(sem.locked())
self.assertEqual(1, sem._value)
with self.loop.run_until_complete(acquire_lock()):
self.assertTrue(sem.locked())
self.assertEqual(2, sem._value)
def test_context_manager_no_yield(self):
sem = asyncio.Semaphore(2, loop=self.loop)
try:
with sem:
self.fail('RuntimeError is not raised in with expression')
except RuntimeError as err:
self.assertEqual(
str(err),
'"yield from" should be used as context manager expression')
self.assertEqual(2, sem._value)
if __name__ == '__main__':
unittest.main()
|
dnanexus/rseqc | refs/heads/master | rseqc/lib/bx/align/sitemask/core.py | 7 | """
Base classes for site maskers.
"""
from bx.filter import *
class Masker( Filter ):
def __init__( self, **kwargs ):
self.masked = 0
self.total = 0
Exception("Abstract class")
class MaskPipeline( Pipeline ):
"""
MaskPipeline implements a Pipeline through which alignments can be
pushed and masked. Pipelines can be aggregated.
"""
def get_masked( self ):
masked = 0
for function in self.pipeline:
try: masked += masker.masked
except AttributeError: pass
return masked
masked = property( fget=get_masked )
def __call__( self, block ):
if not block: return
# push alignment block through all filters
self.total += len( block.components[0].text )
for masker in self.filters:
if not block: return
try: m_filter = masker.__call__
except AttributeError:
raise Exception("Masker in pipeline does not implement \"filter( self, block )\".")
masker( block )
|
israeleriston/scientific-week | refs/heads/master | backend/venv/lib/python3.5/site-packages/flask/config.py | 76 | # -*- coding: utf-8 -*-
"""
flask.config
~~~~~~~~~~~~
Implements the configuration related objects.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import types
import errno
from werkzeug.utils import import_string
from ._compat import string_types, iteritems
from . import json
class ConfigAttribute(object):
"""Makes an attribute forward to the config"""
def __init__(self, name, get_converter=None):
self.__name__ = name
self.get_converter = get_converter
def __get__(self, obj, type=None):
if obj is None:
return self
rv = obj.config[self.__name__]
if self.get_converter is not None:
rv = self.get_converter(rv)
return rv
def __set__(self, obj, value):
obj.config[self.__name__] = value
class Config(dict):
"""Works exactly like a dict but provides ways to fill it from files
or special dictionaries. There are two common patterns to populate the
config.
Either you can fill the config from a config file::
app.config.from_pyfile('yourconfig.cfg')
Or alternatively you can define the configuration options in the
module that calls :meth:`from_object` or provide an import path to
a module that should be loaded. It is also possible to tell it to
use the same module and with that provide the configuration values
just before the call::
DEBUG = True
SECRET_KEY = 'development key'
app.config.from_object(__name__)
In both cases (loading from any Python file or loading from modules),
only uppercase keys are added to the config. This makes it possible to use
lowercase values in the config file for temporary values that are not added
to the config or to define the config keys in the same file that implements
the application.
Probably the most interesting way to load configurations is from an
environment variable pointing to a file::
app.config.from_envvar('YOURAPPLICATION_SETTINGS')
In this case before launching the application you have to set this
environment variable to the file you want to use. On Linux and OS X
use the export statement::
export YOURAPPLICATION_SETTINGS='/path/to/config/file'
On windows use `set` instead.
:param root_path: path to which files are read relative from. When the
config object is created by the application, this is
the application's :attr:`~flask.Flask.root_path`.
:param defaults: an optional dictionary of default values
"""
def __init__(self, root_path, defaults=None):
dict.__init__(self, defaults or {})
self.root_path = root_path
def from_envvar(self, variable_name, silent=False):
"""Loads a configuration from an environment variable pointing to
a configuration file. This is basically just a shortcut with nicer
error messages for this line of code::
app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS'])
:param variable_name: name of the environment variable
:param silent: set to ``True`` if you want silent failure for missing
files.
:return: bool. ``True`` if able to load config, ``False`` otherwise.
"""
rv = os.environ.get(variable_name)
if not rv:
if silent:
return False
raise RuntimeError('The environment variable %r is not set '
'and as such configuration could not be '
'loaded. Set this variable and make it '
'point to a configuration file' %
variable_name)
return self.from_pyfile(rv, silent=silent)
def from_pyfile(self, filename, silent=False):
"""Updates the values in the config from a Python file. This function
behaves as if the file was imported as module with the
:meth:`from_object` function.
:param filename: the filename of the config. This can either be an
absolute filename or a filename relative to the
root path.
:param silent: set to ``True`` if you want silent failure for missing
files.
.. versionadded:: 0.7
`silent` parameter.
"""
filename = os.path.join(self.root_path, filename)
d = types.ModuleType('config')
d.__file__ = filename
try:
with open(filename, mode='rb') as config_file:
exec(compile(config_file.read(), filename, 'exec'), d.__dict__)
except IOError as e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR):
return False
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise
self.from_object(d)
return True
def from_object(self, obj):
"""Updates the values from the given object. An object can be of one
of the following two types:
- a string: in this case the object with that name will be imported
- an actual object reference: that object is used directly
Objects are usually either modules or classes. :meth:`from_object`
loads only the uppercase attributes of the module/class. A ``dict``
object will not work with :meth:`from_object` because the keys of a
``dict`` are not attributes of the ``dict`` class.
Example of module-based configuration::
app.config.from_object('yourapplication.default_config')
from yourapplication import default_config
app.config.from_object(default_config)
You should not use this function to load the actual configuration but
rather configuration defaults. The actual config should be loaded
with :meth:`from_pyfile` and ideally from a location not within the
package because the package might be installed system wide.
See :ref:`config-dev-prod` for an example of class-based configuration
using :meth:`from_object`.
:param obj: an import name or object
"""
if isinstance(obj, string_types):
obj = import_string(obj)
for key in dir(obj):
if key.isupper():
self[key] = getattr(obj, key)
def from_json(self, filename, silent=False):
"""Updates the values in the config from a JSON file. This function
behaves as if the JSON object was a dictionary and passed to the
:meth:`from_mapping` function.
:param filename: the filename of the JSON file. This can either be an
absolute filename or a filename relative to the
root path.
:param silent: set to ``True`` if you want silent failure for missing
files.
.. versionadded:: 0.11
"""
filename = os.path.join(self.root_path, filename)
try:
with open(filename) as json_file:
obj = json.loads(json_file.read())
except IOError as e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR):
return False
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise
return self.from_mapping(obj)
def from_mapping(self, *mapping, **kwargs):
"""Updates the config like :meth:`update` ignoring items with non-upper
keys.
.. versionadded:: 0.11
"""
mappings = []
if len(mapping) == 1:
if hasattr(mapping[0], 'items'):
mappings.append(mapping[0].items())
else:
mappings.append(mapping[0])
elif len(mapping) > 1:
raise TypeError(
'expected at most 1 positional argument, got %d' % len(mapping)
)
mappings.append(kwargs.items())
for mapping in mappings:
for (key, value) in mapping:
if key.isupper():
self[key] = value
return True
def get_namespace(self, namespace, lowercase=True, trim_namespace=True):
"""Returns a dictionary containing a subset of configuration options
that match the specified namespace/prefix. Example usage::
app.config['IMAGE_STORE_TYPE'] = 'fs'
app.config['IMAGE_STORE_PATH'] = '/var/app/images'
app.config['IMAGE_STORE_BASE_URL'] = 'http://img.website.com'
image_store_config = app.config.get_namespace('IMAGE_STORE_')
The resulting dictionary `image_store_config` would look like::
{
'type': 'fs',
'path': '/var/app/images',
'base_url': 'http://img.website.com'
}
This is often useful when configuration options map directly to
keyword arguments in functions or class constructors.
:param namespace: a configuration namespace
:param lowercase: a flag indicating if the keys of the resulting
dictionary should be lowercase
:param trim_namespace: a flag indicating if the keys of the resulting
dictionary should not include the namespace
.. versionadded:: 0.11
"""
rv = {}
for k, v in iteritems(self):
if not k.startswith(namespace):
continue
if trim_namespace:
key = k[len(namespace):]
else:
key = k
if lowercase:
key = key.lower()
rv[key] = v
return rv
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, dict.__repr__(self))
|
hurricup/intellij-community | refs/heads/master | python/testData/intentions/convertLambdaToFunction_after.py | 83 | def newlist(x, y):
return (x + y) / y
x = 1 |
TeamExodus/external_chromium_org | refs/heads/EXODUS-5.1 | mojo/public/python/mojo/bindings/__init__.py | 1201 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
|
sam-roth/Keypad | refs/heads/master | keypad/util/_test_rangedict.py | 1 |
import random
import itertools
import pytest
from .rangedict import RangeDict
random.seed(0)
minkey = 0
maxkey = 100
counter = itertools.count()
def random_op():
k1 = random.randrange(minkey, maxkey)
k2 = random.randrange(k1, maxkey + 1)
v = next(counter)
def delete(d):
del d[k1:k2]
def set(d):
if isinstance(d, list):
for i in range(k1, k2):
d[i] = v
else:
d[k1:k2] = v
def get(d):
try:
return d[k1]
except KeyError:
return None
def splice(d):
if hasattr(d, 'splice'):
d.splice(k1, k2)
else:
lo = d[:k1]
hi = d[k1:]
v = d[k1-1] if k1 != 0 else None
d[:] = lo + [v] * k2 + hi
res = random.choice([delete, set, get, splice])
res.info = (k1, k2, v)
return res
def print_op(o):
print(o.__name__, *o.info)
@pytest.fixture
def random_ops():
return [random_op() for _ in range(500)]
def fixup(l):
if len(l) != maxkey + 1:
l.extend([None] * (maxkey + 1 - len(l)))
def test_ops(random_ops):
ref = [None] * (maxkey + 1)
dut = RangeDict()
for o in random_ops:
print_op(o)
r = o(dut)
s = o(ref)
fixup(ref)
dl = list(dut.values(range(len(ref))))
print(dl)
print(ref)
print(dut)
assert dl == ref
assert r == s
print('ok')
|
vmarkovtsev/django | refs/heads/master | django/db/migrations/operations/base.py | 356 | from __future__ import unicode_literals
from django.db import router
class Operation(object):
"""
Base class for migration operations.
It's responsible for both mutating the in-memory model state
(see db/migrations/state.py) to represent what it performs, as well
as actually performing it against a live database.
Note that some operations won't modify memory state at all (e.g. data
copying operations), and some will need their modifications to be
optionally specified by the user (e.g. custom Python code snippets)
Due to the way this class deals with deconstruction, it should be
considered immutable.
"""
# If this migration can be run in reverse.
# Some operations are impossible to reverse, like deleting data.
reversible = True
# Can this migration be represented as SQL? (things like RunPython cannot)
reduces_to_sql = True
# Should this operation be forced as atomic even on backends with no
# DDL transaction support (i.e., does it have no DDL, like RunPython)
atomic = False
serialization_expand_args = []
def __new__(cls, *args, **kwargs):
# We capture the arguments to make returning them trivial
self = object.__new__(cls)
self._constructor_args = (args, kwargs)
return self
def deconstruct(self):
"""
Returns a 3-tuple of class import path (or just name if it lives
under django.db.migrations), positional arguments, and keyword
arguments.
"""
return (
self.__class__.__name__,
self._constructor_args[0],
self._constructor_args[1],
)
def state_forwards(self, app_label, state):
"""
Takes the state from the previous migration, and mutates it
so that it matches what this migration would perform.
"""
raise NotImplementedError('subclasses of Operation must provide a state_forwards() method')
def database_forwards(self, app_label, schema_editor, from_state, to_state):
"""
Performs the mutation on the database schema in the normal
(forwards) direction.
"""
raise NotImplementedError('subclasses of Operation must provide a database_forwards() method')
def database_backwards(self, app_label, schema_editor, from_state, to_state):
"""
Performs the mutation on the database schema in the reverse
direction - e.g. if this were CreateModel, it would in fact
drop the model's table.
"""
raise NotImplementedError('subclasses of Operation must provide a database_backwards() method')
def describe(self):
"""
Outputs a brief summary of what the action does.
"""
return "%s: %s" % (self.__class__.__name__, self._constructor_args)
def references_model(self, name, app_label=None):
"""
Returns True if there is a chance this operation references the given
model name (as a string), with an optional app label for accuracy.
Used for optimization. If in doubt, return True;
returning a false positive will merely make the optimizer a little
less efficient, while returning a false negative may result in an
unusable optimized migration.
"""
return True
def references_field(self, model_name, name, app_label=None):
"""
Returns True if there is a chance this operation references the given
field name, with an optional app label for accuracy.
Used for optimization. If in doubt, return True.
"""
return self.references_model(model_name, app_label)
def allow_migrate_model(self, connection_alias, model):
"""
Returns if we're allowed to migrate the model.
This is a thin wrapper around router.allow_migrate_model() that
preemptively rejects any proxy, swapped out, or unmanaged model.
"""
if not model._meta.can_migrate(connection_alias):
return False
return router.allow_migrate_model(connection_alias, model)
def __repr__(self):
return "<%s %s%s>" % (
self.__class__.__name__,
", ".join(map(repr, self._constructor_args[0])),
",".join(" %s=%r" % x for x in self._constructor_args[1].items()),
)
|
TheWylieStCoyote/gnuradio | refs/heads/master | gr-digital/examples/ofdm/benchmark_add_channel.py | 3 | #!/usr/bin/env python
#
# Copyright 2010,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gnuradio import gr, channels
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import random, math, sys
class my_top_block(gr.top_block):
def __init__(self, ifile, ofile, options):
gr.top_block.__init__(self)
SNR = 10.0**(options.snr / 10.0)
time_offset = options.time_offset
phase_offset = options.phase_offset*(math.pi / 180.0)
# calculate noise voltage from SNR
power_in_signal = abs(options.tx_amplitude)**2
noise_power = power_in_signal / SNR
noise_voltage = math.sqrt(noise_power)
print("Noise voltage: ", noise_voltage)
frequency_offset = options.frequency_offset / options.fft_length
self.src = blocks.file_source(gr.sizeof_gr_complex, ifile)
#self.throttle = blocks.throttle(gr.sizeof_gr_complex, options.sample_rate)
self.channel = channels.channel_model(noise_voltage, frequency_offset,
time_offset, noise_seed=-random.randint(0,100000))
self.phase = blocks.multiply_const_cc(complex(math.cos(phase_offset),
math.sin(phase_offset)))
self.snk = blocks.file_sink(gr.sizeof_gr_complex, ofile)
self.connect(self.src, self.channel, self.phase, self.snk)
# /////////////////////////////////////////////////////////////////////////////
# main
# /////////////////////////////////////////////////////////////////////////////
def main():
# Create Options Parser:
usage = "benchmack_add_channel.py [options] <input file> <output file>"
parser = OptionParser (usage=usage, option_class=eng_option, conflict_handler="resolve")
parser.add_option("-n", "--snr", type="eng_float", default=30,
help="set the SNR of the channel in dB [default=%default]")
parser.add_option("", "--seed", action="store_true", default=False,
help="use a random seed for AWGN noise [default=%default]")
parser.add_option("-f", "--frequency-offset", type="eng_float", default=0,
help="set frequency offset introduced by channel [default=%default]")
parser.add_option("-t", "--time-offset", type="eng_float", default=1.0,
help="set timing offset between Tx and Rx [default=%default]")
parser.add_option("-p", "--phase-offset", type="eng_float", default=0,
help="set phase offset (in degrees) between Tx and Rx [default=%default]")
parser.add_option("-m", "--use-multipath", action="store_true", default=False,
help="Use a multipath channel [default=%default]")
parser.add_option("", "--fft-length", type="intx", default=None,
help="set the number of FFT bins [default=%default]")
parser.add_option("", "--tx-amplitude", type="eng_float",
default=1.0,
help="tell the simulator the signal amplitude [default=%default]")
(options, args) = parser.parse_args ()
if len(args) != 2:
parser.print_help(sys.stderr)
sys.exit(1)
if options.fft_length is None:
sys.stderr.write("Please enter the FFT length of the OFDM signal.\n")
sys.exit(1)
ifile = args[0]
ofile = args[1]
# build the graph
tb = my_top_block(ifile, ofile, options)
r = gr.enable_realtime_scheduling()
if r != gr.RT_OK:
print("Warning: Failed to enable realtime scheduling.")
tb.start() # start flow graph
tb.wait() # wait for it to finish
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
|
antoinecarme/pyaf | refs/heads/master | tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_MovingAverage_Seasonal_WeekOfYear_LSTM.py | 1 | import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Quantization'] , ['MovingAverage'] , ['Seasonal_WeekOfYear'] , ['LSTM'] ); |
kenshay/ImageScripter | refs/heads/master | ProgramData/SystemFiles/Python/Lib/shlex.py | 16 | # -*- coding: iso-8859-1 -*-
"""A lexical analyzer class for simple shell-like syntaxes."""
# Module and documentation by Eric S. Raymond, 21 Dec 1998
# Input stacking and error message cleanup added by ESR, March 2000
# push_source() and pop_source() made explicit by ESR, January 2001.
# Posix compliance, split(), string arguments, and
# iterator interface by Gustavo Niemeyer, April 2003.
import os.path
import sys
from collections import deque
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__all__ = ["shlex", "split"]
class shlex:
"A lexical analyzer class for simple shell-like syntaxes."
def __init__(self, instream=None, infile=None, posix=False):
if isinstance(instream, basestring):
instream = StringIO(instream)
if instream is not None:
self.instream = instream
self.infile = infile
else:
self.instream = sys.stdin
self.infile = None
self.posix = posix
if posix:
self.eof = None
else:
self.eof = ''
self.commenters = '#'
self.wordchars = ('abcdfeghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_')
if self.posix:
self.wordchars += ('ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ'
'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ')
self.whitespace = ' \t\r\n'
self.whitespace_split = False
self.quotes = '\'"'
self.escape = '\\'
self.escapedquotes = '"'
self.state = ' '
self.pushback = deque()
self.lineno = 1
self.debug = 0
self.token = ''
self.filestack = deque()
self.source = None
if self.debug:
print 'shlex: reading from %s, line %d' \
% (self.instream, self.lineno)
def push_token(self, tok):
"Push a token onto the stack popped by the get_token method"
if self.debug >= 1:
print "shlex: pushing token " + repr(tok)
self.pushback.appendleft(tok)
def push_source(self, newstream, newfile=None):
"Push an input source onto the lexer's input source stack."
if isinstance(newstream, basestring):
newstream = StringIO(newstream)
self.filestack.appendleft((self.infile, self.instream, self.lineno))
self.infile = newfile
self.instream = newstream
self.lineno = 1
if self.debug:
if newfile is not None:
print 'shlex: pushing to file %s' % (self.infile,)
else:
print 'shlex: pushing to stream %s' % (self.instream,)
def pop_source(self):
"Pop the input source stack."
self.instream.close()
(self.infile, self.instream, self.lineno) = self.filestack.popleft()
if self.debug:
print 'shlex: popping to %s, line %d' \
% (self.instream, self.lineno)
self.state = ' '
def get_token(self):
"Get a token from the input stream (or from stack if it's nonempty)"
if self.pushback:
tok = self.pushback.popleft()
if self.debug >= 1:
print "shlex: popping token " + repr(tok)
return tok
# No pushback. Get a token.
raw = self.read_token()
# Handle inclusions
if self.source is not None:
while raw == self.source:
spec = self.sourcehook(self.read_token())
if spec:
(newfile, newstream) = spec
self.push_source(newstream, newfile)
raw = self.get_token()
# Maybe we got EOF instead?
while raw == self.eof:
if not self.filestack:
return self.eof
else:
self.pop_source()
raw = self.get_token()
# Neither inclusion nor EOF
if self.debug >= 1:
if raw != self.eof:
print "shlex: token=" + repr(raw)
else:
print "shlex: token=EOF"
return raw
def read_token(self):
quoted = False
escapedstate = ' '
while True:
nextchar = self.instream.read(1)
if nextchar == '\n':
self.lineno = self.lineno + 1
if self.debug >= 3:
print "shlex: in state", repr(self.state), \
"I see character:", repr(nextchar)
if self.state is None:
self.token = '' # past end of file
break
elif self.state == ' ':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print "shlex: I see whitespace in whitespace state"
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.state = nextchar
elif nextchar in self.wordchars:
self.token = nextchar
self.state = 'a'
elif nextchar in self.quotes:
if not self.posix:
self.token = nextchar
self.state = nextchar
elif self.whitespace_split:
self.token = nextchar
self.state = 'a'
else:
self.token = nextchar
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif self.state in self.quotes:
quoted = True
if not nextchar: # end of file
if self.debug >= 2:
print "shlex: I see EOF in quotes state"
# XXX what error should be raised here?
raise ValueError, "No closing quotation"
if nextchar == self.state:
if not self.posix:
self.token = self.token + nextchar
self.state = ' '
break
else:
self.state = 'a'
elif self.posix and nextchar in self.escape and \
self.state in self.escapedquotes:
escapedstate = self.state
self.state = nextchar
else:
self.token = self.token + nextchar
elif self.state in self.escape:
if not nextchar: # end of file
if self.debug >= 2:
print "shlex: I see EOF in escape state"
# XXX what error should be raised here?
raise ValueError, "No escaped character"
# In posix shells, only the quote itself or the escape
# character may be escaped within quotes.
if escapedstate in self.quotes and \
nextchar != self.state and nextchar != escapedstate:
self.token = self.token + self.state
self.token = self.token + nextchar
self.state = escapedstate
elif self.state == 'a':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print "shlex: I see whitespace in word state"
self.state = ' '
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
if self.posix:
self.state = ' '
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif self.posix and nextchar in self.quotes:
self.state = nextchar
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.state = nextchar
elif nextchar in self.wordchars or nextchar in self.quotes \
or self.whitespace_split:
self.token = self.token + nextchar
else:
self.pushback.appendleft(nextchar)
if self.debug >= 2:
print "shlex: I see punctuation in word state"
self.state = ' '
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
result = self.token
self.token = ''
if self.posix and not quoted and result == '':
result = None
if self.debug > 1:
if result:
print "shlex: raw token=" + repr(result)
else:
print "shlex: raw token=EOF"
return result
def sourcehook(self, newfile):
"Hook called on a filename to be sourced."
if newfile[0] == '"':
newfile = newfile[1:-1]
# This implements cpp-like semantics for relative-path inclusion.
if isinstance(self.infile, basestring) and not os.path.isabs(newfile):
newfile = os.path.join(os.path.dirname(self.infile), newfile)
return (newfile, open(newfile, "r"))
def error_leader(self, infile=None, lineno=None):
"Emit a C-compiler-like, Emacs-friendly error-message leader."
if infile is None:
infile = self.infile
if lineno is None:
lineno = self.lineno
return "\"%s\", line %d: " % (infile, lineno)
def __iter__(self):
return self
def next(self):
token = self.get_token()
if token == self.eof:
raise StopIteration
return token
def split(s, comments=False, posix=True):
lex = shlex(s, posix=posix)
lex.whitespace_split = True
if not comments:
lex.commenters = ''
return list(lex)
if __name__ == '__main__':
if len(sys.argv) == 1:
lexer = shlex()
else:
file = sys.argv[1]
lexer = shlex(open(file), file)
while 1:
tt = lexer.get_token()
if tt:
print "Token: " + repr(tt)
else:
break
|
tiagofrepereira2012/tensorflow | refs/heads/master | tensorflow/python/profiler/model_analyzer.py | 9 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model Analyzer.
Analyze model, including shape, params, time, memory, structure, etc.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.core.profiler import tfprof_options_pb2
from tensorflow.core.profiler import tfprof_output_pb2
from tensorflow.python import pywrap_tensorflow as print_mdl
from tensorflow.python.framework import errors
from tensorflow.python.profiler import option_builder
from tensorflow.python.profiler import tfprof_logger
_DEFAULT_PROFILE_OPTIONS = 0
_DEFAULT_ADVISE_OPTIONS = 0
# The following options are for 'advise' cmd.
# Show all advice.
ALL_ADVICE = {
'ExpensiveOperationChecker': {},
'AcceleratorUtilizationChecker': {},
'JobChecker': {}, # Only available internally.
'OperationChecker': {},
}
def _build_options(options):
"""Build tfprof.OptionsProto.
Args:
options: A dictionary of options.
Returns:
tfprof.OptionsProto.
"""
opts = tfprof_options_pb2.OptionsProto()
opts.max_depth = options.get('max_depth', 10)
opts.min_bytes = options.get('min_bytes', 0)
opts.min_micros = options.get('min_micros', 0)
opts.min_params = options.get('min_params', 0)
opts.min_float_ops = options.get('min_float_ops', 0)
opts.min_occurrence = options.get('min_occurrence', 0)
opts.step = options.get('step', -1)
opts.order_by = options.get('order_by', 'name')
for p in options.get('account_type_regexes', []):
opts.account_type_regexes.append(p)
for p in options.get('start_name_regexes', []):
opts.start_name_regexes.append(p)
for p in options.get('trim_name_regexes', []):
opts.trim_name_regexes.append(p)
for p in options.get('show_name_regexes', []):
opts.show_name_regexes.append(p)
for p in options.get('hide_name_regexes', []):
opts.hide_name_regexes.append(p)
opts.account_displayed_op_only = options.get('account_displayed_op_only',
False)
for p in options.get('select', []):
opts.select.append(p)
opts.output = options.get('output', 'stdout')
opts.dump_to_file = options.get('dump_to_file', '')
return opts
def _build_advisor_options(options):
"""Build tfprof.AdvisorOptionsProto.
Args:
options: A dictionary of options. See ALL_ADVICE example.
Returns:
tfprof.AdvisorOptionsProto.
"""
opts = tfprof_options_pb2.AdvisorOptionsProto()
if options is None:
return opts
for checker, checker_opts in six.iteritems(options):
checker_ops_pb = tfprof_options_pb2.AdvisorOptionsProto.CheckerOption()
for k, v in six.iteritems(checker_opts):
checker_ops_pb[k] = v
opts.checkers[checker].MergeFrom(checker_ops_pb)
return opts
class Profiler(object):
"""TensorFlow multi-step profiler.
https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/README.md
```python
Typical use case:
# Currently we are only allowed to create 1 profiler per process.
profiler = Profile(sess.graph)
for i in xrange(total_steps):
if i % 10000 == 0:
run_meta = tf.RunMetadata()
_ = sess.run(...,
options=tf.RunOptions(
trace_level=tf.RunOptions.FULL_TRACE),
run_metadata=run_meta)
profiler.add_step(i, run_meta)
# Profile the parameters of your model.
profiler.profile_name_scope(options=(option_builder.ProfileOptionBuilder
.trainable_variables_parameter()))
# Or profile the timing of your model operations.
opts = option_builder.ProfileOptionBuilder.time_and_memory()
profiler.profile_operations(options=opts)
# Or you can generate a timeline:
opts = (option_builder.ProfileOptionBuilder(
option_builder.ProfileOptionBuilder.time_and_memory())
.with_step(i)
.with_timeline_output(filename).build())
profiler.profile_graph(options=opts)
else:
_ = sess.run(...)
# Auto detect problems and generate advice.
profiler.advise()
```
"""
def __init__(self, graph, op_log=None):
"""Constructor.
Args:
graph: tf.Graph.
op_log: optional. tensorflow::tfprof::OpLogProto proto. Used to define
extra op types.
"""
self._graph = graph
# pylint: disable=protected-access
op_log = tfprof_logger._merge_default_with_oplog(
self._graph, op_log=op_log)
# pylint: enable=protected-access
print_mdl.NewProfiler(
self._graph.as_graph_def(add_shapes=True).SerializeToString(),
op_log.SerializeToString())
def __del__(self):
print_mdl.DeleteProfiler()
def add_step(self, step, run_meta):
"""Add statistics of a step.
Args:
step: A step uint64 used to identify the RunMetadata. Must be different
across different AddStep() calls.
run_meta: RunMetadata proto that contains statistics of a session run.
"""
# pylint: disable=protected-access
op_log = tfprof_logger._merge_default_with_oplog(
self._graph, run_meta=run_meta, add_trace=False,
add_trainable_var=False)
# pylint: enable=protected-access
print_mdl.AddStep(
step, run_meta.SerializeToString(), op_log.SerializeToString())
def profile_python(self, options):
"""Profile the statistics of the Python codes.
By default, it shows the call stack from root. To avoid
redundant output, you may use options to filter as below
options['show_name_regexes'] = ['.*my_code.py.*']
Args:
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
a MultiGraphNodeProto that records the results.
"""
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.MultiGraphNodeProto()
tfprof_node.ParseFromString(
print_mdl.Profile('code'.encode('utf-8'), opts.SerializeToString()))
return tfprof_node
def profile_operations(self, options):
"""Profile the statistics of the Operation types (e.g. MatMul, Conv2D).
Args:
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
a MultiGraphNodeProto that records the results.
"""
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.MultiGraphNodeProto()
tfprof_node.ParseFromString(
print_mdl.Profile('op'.encode('utf-8'), opts.SerializeToString()))
return tfprof_node
def profile_name_scope(self, options):
"""Profile the statistics of graph nodes, organized by name scope.
Args:
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
a GraphNodeProto that records the results.
"""
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.GraphNodeProto()
tfprof_node.ParseFromString(
print_mdl.Profile('scope'.encode('utf-8'), opts.SerializeToString()))
return tfprof_node
def profile_graph(self, options):
"""Profile the statistics of graph nodes, organized by dataflow graph.
Args:
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
a GraphNodeProto that records the results.
"""
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.GraphNodeProto()
tfprof_node.ParseFromString(
print_mdl.Profile('graph'.encode('utf-8'), opts.SerializeToString()))
return tfprof_node
def advise(self, options):
"""Automatically detect problems and generate reports.
Args:
options: A dict of options. See ALL_ADVICE example above.
Returns:
A Advise proto that conains the reports from all checkers.
"""
advise_pb = tfprof_output_pb2.AdviceProto()
opts = _build_advisor_options(options)
advise_pb.ParseFromString(
print_mdl.Profile('advise'.encode('utf-8'), opts.SerializeToString()))
return advise_pb
def profile(graph,
run_meta=None,
op_log=None,
cmd='scope',
options=_DEFAULT_PROFILE_OPTIONS):
"""Profile model.
Tutorials and examples can be found in:
https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/README.md
Args:
graph: required tf.Graph.
run_meta: optional tensorflow.RunMetadata proto. It is necessary to
to support run time information profiling, such as time and memory.
op_log: tensorflow.tfprof.OpLogProto proto. User can assign "types" to
graph nodes with op_log. "types" allow user to flexibly group and
account profiles using options['accounted_type_regexes'].
cmd: string. Either 'op', 'scope', 'graph' or 'code'.
'op' view organizes profile using operation type. (e.g. MatMul)
'scope' view organizes profile using graph node name scope.
'graph' view organizes profile using graph node inputs/outputs.
'code' view organizes profile using Python call stack.
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
If cmd is 'scope' or 'graph', returns GraphNodeProto proto.
If cmd is 'op' or 'code', returns MultiGraphNodeProto proto.
Side effect: stdout/file/timeline.json depending on options['output']
"""
if options == _DEFAULT_PROFILE_OPTIONS:
options = (option_builder.ProfileOptionBuilder
.trainable_variables_parameter())
# pylint: disable=protected-access
op_log = tfprof_logger._merge_default_with_oplog(
graph, op_log, run_meta, add_trace=cmd == 'code')
# pylint: enable=protected-access
opts = _build_options(options)
run_meta_str = run_meta.SerializeToString() if run_meta else b''
if cmd == 'code' or cmd == 'op':
tfprof_node = tfprof_output_pb2.MultiGraphNodeProto()
tfprof_node.ParseFromString(
print_mdl.PrintModelAnalysis(
graph.as_graph_def(add_shapes=True).SerializeToString(),
run_meta_str,
op_log.SerializeToString(),
cmd.encode('utf-8'),
opts.SerializeToString()))
elif cmd == 'graph' or cmd == 'scope':
tfprof_node = tfprof_output_pb2.GraphNodeProto()
tfprof_node.ParseFromString(
print_mdl.PrintModelAnalysis(
graph.as_graph_def(add_shapes=True).SerializeToString(),
run_meta_str,
op_log.SerializeToString(),
cmd.encode('utf-8'),
opts.SerializeToString()))
else:
raise errors.InvalidArgumentError(
None, None, 'unknown cmd: %s\n' % cmd)
return tfprof_node
def advise(graph, run_meta=None, options=_DEFAULT_ADVISE_OPTIONS):
"""Auto profile and advise.
Builds profiles and automatically check anomalies of various
aspects. For more details:
https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/README.md
Args:
graph: required tf.Graph.
run_meta: optional tensorflow.RunMetadata proto. It is necessary to
to support run time information profiling, such as time and memory.
options: see ALL_ADVICE example above. Default checks everything.
Returns:
Returns AdviceProto proto
"""
if options == _DEFAULT_ADVISE_OPTIONS:
options = ALL_ADVICE.copy()
# pylint: disable=protected-access
op_log = tfprof_logger._merge_default_with_oplog(
graph, None, run_meta, add_trace=True)
# pylint: enable=protected-access
run_meta_str = run_meta.SerializeToString() if run_meta else b''
opts = _build_advisor_options(options)
ret = tfprof_output_pb2.AdviceProto()
ret.ParseFromString(
print_mdl.PrintModelAnalysis(
graph.as_graph_def(add_shapes=True).SerializeToString(),
run_meta_str,
op_log.SerializeToString(),
'advise'.encode('utf-8'),
opts.SerializeToString()))
return ret
|
AntouanK/rethinkdb | refs/heads/next | test/rql_test/connections/http_support/decorator/decorator.py | 112 | ########################## LICENCE ###############################
# Copyright (c) 2005-2012, Michele Simionato
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in bytecode form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
"""
Decorator module, see http://pypi.python.org/pypi/decorator
for the documentation.
"""
__version__ = '3.4.0'
__all__ = ["decorator", "FunctionMaker", "contextmanager"]
import sys, re, inspect
if sys.version >= '3':
from inspect import getfullargspec
def get_init(cls):
return cls.__init__
else:
class getfullargspec(object):
"A quick and dirty replacement for getfullargspec for Python 2.X"
def __init__(self, f):
self.args, self.varargs, self.varkw, self.defaults = \
inspect.getargspec(f)
self.kwonlyargs = []
self.kwonlydefaults = None
def __iter__(self):
yield self.args
yield self.varargs
yield self.varkw
yield self.defaults
def get_init(cls):
return cls.__init__.im_func
DEF = re.compile('\s*def\s*([_\w][_\w\d]*)\s*\(')
# basic functionality
class FunctionMaker(object):
"""
An object with the ability to create functions with a given signature.
It has attributes name, doc, module, signature, defaults, dict and
methods update and make.
"""
def __init__(self, func=None, name=None, signature=None,
defaults=None, doc=None, module=None, funcdict=None):
self.shortsignature = signature
if func:
# func can be a class or a callable, but not an instance method
self.name = func.__name__
if self.name == '<lambda>': # small hack for lambda functions
self.name = '_lambda_'
self.doc = func.__doc__
self.module = func.__module__
if inspect.isfunction(func):
argspec = getfullargspec(func)
self.annotations = getattr(func, '__annotations__', {})
for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs',
'kwonlydefaults'):
setattr(self, a, getattr(argspec, a))
for i, arg in enumerate(self.args):
setattr(self, 'arg%d' % i, arg)
if sys.version < '3': # easy way
self.shortsignature = self.signature = \
inspect.formatargspec(
formatvalue=lambda val: "", *argspec)[1:-1]
else: # Python 3 way
allargs = list(self.args)
allshortargs = list(self.args)
if self.varargs:
allargs.append('*' + self.varargs)
allshortargs.append('*' + self.varargs)
elif self.kwonlyargs:
allargs.append('*') # single star syntax
for a in self.kwonlyargs:
allargs.append('%s=None' % a)
allshortargs.append('%s=%s' % (a, a))
if self.varkw:
allargs.append('**' + self.varkw)
allshortargs.append('**' + self.varkw)
self.signature = ', '.join(allargs)
self.shortsignature = ', '.join(allshortargs)
self.dict = func.__dict__.copy()
# func=None happens when decorating a caller
if name:
self.name = name
if signature is not None:
self.signature = signature
if defaults:
self.defaults = defaults
if doc:
self.doc = doc
if module:
self.module = module
if funcdict:
self.dict = funcdict
# check existence required attributes
assert hasattr(self, 'name')
if not hasattr(self, 'signature'):
raise TypeError('You are decorating a non function: %s' % func)
def update(self, func, **kw):
"Update the signature of func with the data in self"
func.__name__ = self.name
func.__doc__ = getattr(self, 'doc', None)
func.__dict__ = getattr(self, 'dict', {})
func.func_defaults = getattr(self, 'defaults', ())
func.__kwdefaults__ = getattr(self, 'kwonlydefaults', None)
func.__annotations__ = getattr(self, 'annotations', None)
callermodule = sys._getframe(3).f_globals.get('__name__', '?')
func.__module__ = getattr(self, 'module', callermodule)
func.__dict__.update(kw)
def make(self, src_templ, evaldict=None, addsource=False, **attrs):
"Make a new function from a given template and update the signature"
src = src_templ % vars(self) # expand name and signature
evaldict = evaldict or {}
mo = DEF.match(src)
if mo is None:
raise SyntaxError('not a valid function template\n%s' % src)
name = mo.group(1) # extract the function name
names = set([name] + [arg.strip(' *') for arg in
self.shortsignature.split(',')])
for n in names:
if n in ('_func_', '_call_'):
raise NameError('%s is overridden in\n%s' % (n, src))
if not src.endswith('\n'): # add a newline just for safety
src += '\n' # this is needed in old versions of Python
try:
code = compile(src, '<string>', 'single')
# print >> sys.stderr, 'Compiling %s' % src
exec code in evaldict
except:
print >> sys.stderr, 'Error in generated code:'
print >> sys.stderr, src
raise
func = evaldict[name]
if addsource:
attrs['__source__'] = src
self.update(func, **attrs)
return func
@classmethod
def create(cls, obj, body, evaldict, defaults=None,
doc=None, module=None, addsource=True, **attrs):
"""
Create a function from the strings name, signature and body.
evaldict is the evaluation dictionary. If addsource is true an attribute
__source__ is added to the result. The attributes attrs are added,
if any.
"""
if isinstance(obj, str): # "name(signature)"
name, rest = obj.strip().split('(', 1)
signature = rest[:-1] #strip a right parens
func = None
else: # a function
name = None
signature = None
func = obj
self = cls(func, name, signature, defaults, doc, module)
ibody = '\n'.join(' ' + line for line in body.splitlines())
return self.make('def %(name)s(%(signature)s):\n' + ibody,
evaldict, addsource, **attrs)
def decorator(caller, func=None):
"""
decorator(caller) converts a caller function into a decorator;
decorator(caller, func) decorates a function using a caller.
"""
if func is not None: # returns a decorated function
evaldict = func.func_globals.copy()
evaldict['_call_'] = caller
evaldict['_func_'] = func
return FunctionMaker.create(
func, "return _call_(_func_, %(shortsignature)s)",
evaldict, undecorated=func, __wrapped__=func)
else: # returns a decorator
if inspect.isclass(caller):
name = caller.__name__.lower()
callerfunc = get_init(caller)
doc = 'decorator(%s) converts functions/generators into ' \
'factories of %s objects' % (caller.__name__, caller.__name__)
fun = getfullargspec(callerfunc).args[1] # second arg
elif inspect.isfunction(caller):
name = '_lambda_' if caller.__name__ == '<lambda>' \
else caller.__name__
callerfunc = caller
doc = caller.__doc__
fun = getfullargspec(callerfunc).args[0] # first arg
else: # assume caller is an object with a __call__ method
name = caller.__class__.__name__.lower()
callerfunc = caller.__call__.im_func
doc = caller.__call__.__doc__
fun = getfullargspec(callerfunc).args[1] # second arg
evaldict = callerfunc.func_globals.copy()
evaldict['_call_'] = caller
evaldict['decorator'] = decorator
return FunctionMaker.create(
'%s(%s)' % (name, fun),
'return decorator(_call_, %s)' % fun,
evaldict, undecorated=caller, __wrapped__=caller,
doc=doc, module=caller.__module__)
######################### contextmanager ########################
def __call__(self, func):
'Context manager decorator'
return FunctionMaker.create(
func, "with _self_: return _func_(%(shortsignature)s)",
dict(_self_=self, _func_=func), __wrapped__=func)
try: # Python >= 3.2
from contextlib import _GeneratorContextManager
ContextManager = type(
'ContextManager', (_GeneratorContextManager,), dict(__call__=__call__))
except ImportError: # Python >= 2.5
from contextlib import GeneratorContextManager
def __init__(self, f, *a, **k):
return GeneratorContextManager.__init__(self, f(*a, **k))
ContextManager = type(
'ContextManager', (GeneratorContextManager,),
dict(__call__=__call__, __init__=__init__))
contextmanager = decorator(ContextManager)
|
disruptek/boto | refs/heads/develop | boto/elastictranscoder/exceptions.py | 184 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.exception import JSONResponseError
class LimitExceededException(JSONResponseError):
pass
class ResourceInUseException(JSONResponseError):
pass
class AccessDeniedException(JSONResponseError):
pass
class ResourceNotFoundException(JSONResponseError):
pass
class InternalServiceException(JSONResponseError):
pass
class ValidationException(JSONResponseError):
pass
class IncompatibleVersionException(JSONResponseError):
pass
|
lafayette/JBTT | refs/heads/master | framework/python/Lib/encodings/iso2022_jp_2004.py | 816 | #
# iso2022_jp_2004.py: Python Unicode Codec for ISO2022_JP_2004
#
# Written by Hye-Shik Chang <[email protected]>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_2004')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_jp_2004',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
neavouli/yournextrepresentative | refs/heads/release-neavouli | elections/uk/settings.py | 4 | from __future__ import unicode_literals
MAPIT_BASE_URL = 'http://mapit.democracyclub.org.uk/'
SITE_OWNER = 'Democracy Club'
COPYRIGHT_HOLDER = 'Democracy Club Limited'
|
dafx/aubio | refs/heads/master | python/demos/demo_source.py | 5 | #! /usr/bin/env python
import sys
from aubio import source
if __name__ == '__main__':
if len(sys.argv) < 2:
print('usage: %s <inputfile> [samplerate] [hop_size]' % sys.argv[0])
sys.exit(1)
samplerate = 0
hop_size = 256
if len(sys.argv) > 2: samplerate = int(sys.argv[2])
if len(sys.argv) > 3: hop_size = int(sys.argv[3])
f = source(sys.argv[1], samplerate, hop_size)
samplerate = f.samplerate
total_frames, read = 0, f.hop_size
while read:
vec, read = f()
total_frames += read
if read < f.hop_size: break
outstr = "read %.2fs" % (total_frames / float(samplerate))
outstr += " (%d frames in" % total_frames
outstr += " %d blocks" % (total_frames // f.hop_size)
outstr += " at %dHz)" % f.samplerate
outstr += " from " + f.uri
print(outstr)
|
bratsche/Neutron-Drive | refs/heads/master | google_appengine/lib/antlr3/antlr3/tree.py | 78 | """ @package antlr3.tree
@brief ANTLR3 runtime package, tree module
This module contains all support classes for AST construction and tree parsers.
"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
# lot's of docstrings are missing, don't complain for now...
# pylint: disable-msg=C0111
from antlr3.constants import UP, DOWN, EOF, INVALID_TOKEN_TYPE
from antlr3.recognizers import BaseRecognizer, RuleReturnScope
from antlr3.streams import IntStream
from antlr3.tokens import CommonToken, Token, INVALID_TOKEN
from antlr3.exceptions import MismatchedTreeNodeException, \
MissingTokenException, UnwantedTokenException, MismatchedTokenException, \
NoViableAltException
############################################################################
#
# tree related exceptions
#
############################################################################
class RewriteCardinalityException(RuntimeError):
"""
@brief Base class for all exceptions thrown during AST rewrite construction.
This signifies a case where the cardinality of two or more elements
in a subrule are different: (ID INT)+ where |ID|!=|INT|
"""
def __init__(self, elementDescription):
RuntimeError.__init__(self, elementDescription)
self.elementDescription = elementDescription
def getMessage(self):
return self.elementDescription
class RewriteEarlyExitException(RewriteCardinalityException):
"""@brief No elements within a (...)+ in a rewrite rule"""
def __init__(self, elementDescription=None):
RewriteCardinalityException.__init__(self, elementDescription)
class RewriteEmptyStreamException(RewriteCardinalityException):
"""
@brief Ref to ID or expr but no tokens in ID stream or subtrees in expr stream
"""
pass
############################################################################
#
# basic Tree and TreeAdaptor interfaces
#
############################################################################
class Tree(object):
"""
@brief Abstract baseclass for tree nodes.
What does a tree look like? ANTLR has a number of support classes
such as CommonTreeNodeStream that work on these kinds of trees. You
don't have to make your trees implement this interface, but if you do,
you'll be able to use more support code.
NOTE: When constructing trees, ANTLR can build any kind of tree; it can
even use Token objects as trees if you add a child list to your tokens.
This is a tree node without any payload; just navigation and factory stuff.
"""
def getChild(self, i):
raise NotImplementedError
def getChildCount(self):
raise NotImplementedError
def getParent(self):
"""Tree tracks parent and child index now > 3.0"""
raise NotImplementedError
def setParent(self, t):
"""Tree tracks parent and child index now > 3.0"""
raise NotImplementedError
def getChildIndex(self):
"""This node is what child index? 0..n-1"""
raise NotImplementedError
def setChildIndex(self, index):
"""This node is what child index? 0..n-1"""
raise NotImplementedError
def freshenParentAndChildIndexes(self):
"""Set the parent and child index values for all children"""
raise NotImplementedError
def addChild(self, t):
"""
Add t as a child to this node. If t is null, do nothing. If t
is nil, add all children of t to this' children.
"""
raise NotImplementedError
def setChild(self, i, t):
"""Set ith child (0..n-1) to t; t must be non-null and non-nil node"""
raise NotImplementedError
def deleteChild(self, i):
raise NotImplementedError
def replaceChildren(self, startChildIndex, stopChildIndex, t):
"""
Delete children from start to stop and replace with t even if t is
a list (nil-root tree). num of children can increase or decrease.
For huge child lists, inserting children can force walking rest of
children to set their childindex; could be slow.
"""
raise NotImplementedError
def isNil(self):
"""
Indicates the node is a nil node but may still have children, meaning
the tree is a flat list.
"""
raise NotImplementedError
def getTokenStartIndex(self):
"""
What is the smallest token index (indexing from 0) for this node
and its children?
"""
raise NotImplementedError
def setTokenStartIndex(self, index):
raise NotImplementedError
def getTokenStopIndex(self):
"""
What is the largest token index (indexing from 0) for this node
and its children?
"""
raise NotImplementedError
def setTokenStopIndex(self, index):
raise NotImplementedError
def dupNode(self):
raise NotImplementedError
def getType(self):
"""Return a token type; needed for tree parsing."""
raise NotImplementedError
def getText(self):
raise NotImplementedError
def getLine(self):
"""
In case we don't have a token payload, what is the line for errors?
"""
raise NotImplementedError
def getCharPositionInLine(self):
raise NotImplementedError
def toStringTree(self):
raise NotImplementedError
def toString(self):
raise NotImplementedError
class TreeAdaptor(object):
"""
@brief Abstract baseclass for tree adaptors.
How to create and navigate trees. Rather than have a separate factory
and adaptor, I've merged them. Makes sense to encapsulate.
This takes the place of the tree construction code generated in the
generated code in 2.x and the ASTFactory.
I do not need to know the type of a tree at all so they are all
generic Objects. This may increase the amount of typecasting needed. :(
"""
# C o n s t r u c t i o n
def createWithPayload(self, payload):
"""
Create a tree node from Token object; for CommonTree type trees,
then the token just becomes the payload. This is the most
common create call.
Override if you want another kind of node to be built.
"""
raise NotImplementedError
def dupNode(self, treeNode):
"""Duplicate a single tree node.
Override if you want another kind of node to be built."""
raise NotImplementedError
def dupTree(self, tree):
"""Duplicate tree recursively, using dupNode() for each node"""
raise NotImplementedError
def nil(self):
"""
Return a nil node (an empty but non-null node) that can hold
a list of element as the children. If you want a flat tree (a list)
use "t=adaptor.nil(); t.addChild(x); t.addChild(y);"
"""
raise NotImplementedError
def errorNode(self, input, start, stop, exc):
"""
Return a tree node representing an error. This node records the
tokens consumed during error recovery. The start token indicates the
input symbol at which the error was detected. The stop token indicates
the last symbol consumed during recovery.
You must specify the input stream so that the erroneous text can
be packaged up in the error node. The exception could be useful
to some applications; default implementation stores ptr to it in
the CommonErrorNode.
This only makes sense during token parsing, not tree parsing.
Tree parsing should happen only when parsing and tree construction
succeed.
"""
raise NotImplementedError
def isNil(self, tree):
"""Is tree considered a nil node used to make lists of child nodes?"""
raise NotImplementedError
def addChild(self, t, child):
"""
Add a child to the tree t. If child is a flat tree (a list), make all
in list children of t. Warning: if t has no children, but child does
and child isNil then you can decide it is ok to move children to t via
t.children = child.children; i.e., without copying the array. Just
make sure that this is consistent with have the user will build
ASTs. Do nothing if t or child is null.
"""
raise NotImplementedError
def becomeRoot(self, newRoot, oldRoot):
"""
If oldRoot is a nil root, just copy or move the children to newRoot.
If not a nil root, make oldRoot a child of newRoot.
old=^(nil a b c), new=r yields ^(r a b c)
old=^(a b c), new=r yields ^(r ^(a b c))
If newRoot is a nil-rooted single child tree, use the single
child as the new root node.
old=^(nil a b c), new=^(nil r) yields ^(r a b c)
old=^(a b c), new=^(nil r) yields ^(r ^(a b c))
If oldRoot was null, it's ok, just return newRoot (even if isNil).
old=null, new=r yields r
old=null, new=^(nil r) yields ^(nil r)
Return newRoot. Throw an exception if newRoot is not a
simple node or nil root with a single child node--it must be a root
node. If newRoot is ^(nil x) return x as newRoot.
Be advised that it's ok for newRoot to point at oldRoot's
children; i.e., you don't have to copy the list. We are
constructing these nodes so we should have this control for
efficiency.
"""
raise NotImplementedError
def rulePostProcessing(self, root):
"""
Given the root of the subtree created for this rule, post process
it to do any simplifications or whatever you want. A required
behavior is to convert ^(nil singleSubtree) to singleSubtree
as the setting of start/stop indexes relies on a single non-nil root
for non-flat trees.
Flat trees such as for lists like "idlist : ID+ ;" are left alone
unless there is only one ID. For a list, the start/stop indexes
are set in the nil node.
This method is executed after all rule tree construction and right
before setTokenBoundaries().
"""
raise NotImplementedError
def getUniqueID(self, node):
"""For identifying trees.
How to identify nodes so we can say "add node to a prior node"?
Even becomeRoot is an issue. Use System.identityHashCode(node)
usually.
"""
raise NotImplementedError
# R e w r i t e R u l e s
def createFromToken(self, tokenType, fromToken, text=None):
"""
Create a new node derived from a token, with a new token type and
(optionally) new text.
This is invoked from an imaginary node ref on right side of a
rewrite rule as IMAG[$tokenLabel] or IMAG[$tokenLabel "IMAG"].
This should invoke createToken(Token).
"""
raise NotImplementedError
def createFromType(self, tokenType, text):
"""Create a new node derived from a token, with a new token type.
This is invoked from an imaginary node ref on right side of a
rewrite rule as IMAG["IMAG"].
This should invoke createToken(int,String).
"""
raise NotImplementedError
# C o n t e n t
def getType(self, t):
"""For tree parsing, I need to know the token type of a node"""
raise NotImplementedError
def setType(self, t, type):
"""Node constructors can set the type of a node"""
raise NotImplementedError
def getText(self, t):
raise NotImplementedError
def setText(self, t, text):
"""Node constructors can set the text of a node"""
raise NotImplementedError
def getToken(self, t):
"""Return the token object from which this node was created.
Currently used only for printing an error message.
The error display routine in BaseRecognizer needs to
display where the input the error occurred. If your
tree of limitation does not store information that can
lead you to the token, you can create a token filled with
the appropriate information and pass that back. See
BaseRecognizer.getErrorMessage().
"""
raise NotImplementedError
def setTokenBoundaries(self, t, startToken, stopToken):
"""
Where are the bounds in the input token stream for this node and
all children? Each rule that creates AST nodes will call this
method right before returning. Flat trees (i.e., lists) will
still usually have a nil root node just to hold the children list.
That node would contain the start/stop indexes then.
"""
raise NotImplementedError
def getTokenStartIndex(self, t):
"""
Get the token start index for this subtree; return -1 if no such index
"""
raise NotImplementedError
def getTokenStopIndex(self, t):
"""
Get the token stop index for this subtree; return -1 if no such index
"""
raise NotImplementedError
# N a v i g a t i o n / T r e e P a r s i n g
def getChild(self, t, i):
"""Get a child 0..n-1 node"""
raise NotImplementedError
def setChild(self, t, i, child):
"""Set ith child (0..n-1) to t; t must be non-null and non-nil node"""
raise NotImplementedError
def deleteChild(self, t, i):
"""Remove ith child and shift children down from right."""
raise NotImplementedError
def getChildCount(self, t):
"""How many children? If 0, then this is a leaf node"""
raise NotImplementedError
def getParent(self, t):
"""
Who is the parent node of this node; if null, implies node is root.
If your node type doesn't handle this, it's ok but the tree rewrites
in tree parsers need this functionality.
"""
raise NotImplementedError
def setParent(self, t, parent):
"""
Who is the parent node of this node; if null, implies node is root.
If your node type doesn't handle this, it's ok but the tree rewrites
in tree parsers need this functionality.
"""
raise NotImplementedError
def getChildIndex(self, t):
"""
What index is this node in the child list? Range: 0..n-1
If your node type doesn't handle this, it's ok but the tree rewrites
in tree parsers need this functionality.
"""
raise NotImplementedError
def setChildIndex(self, t, index):
"""
What index is this node in the child list? Range: 0..n-1
If your node type doesn't handle this, it's ok but the tree rewrites
in tree parsers need this functionality.
"""
raise NotImplementedError
def replaceChildren(self, parent, startChildIndex, stopChildIndex, t):
"""
Replace from start to stop child index of parent with t, which might
be a list. Number of children may be different
after this call.
If parent is null, don't do anything; must be at root of overall tree.
Can't replace whatever points to the parent externally. Do nothing.
"""
raise NotImplementedError
# Misc
def create(self, *args):
"""
Deprecated, use createWithPayload, createFromToken or createFromType.
This method only exists to mimic the Java interface of TreeAdaptor.
"""
if len(args) == 1 and isinstance(args[0], Token):
# Object create(Token payload);
## warnings.warn(
## "Using create() is deprecated, use createWithPayload()",
## DeprecationWarning,
## stacklevel=2
## )
return self.createWithPayload(args[0])
if (len(args) == 2
and isinstance(args[0], (int, long))
and isinstance(args[1], Token)
):
# Object create(int tokenType, Token fromToken);
## warnings.warn(
## "Using create() is deprecated, use createFromToken()",
## DeprecationWarning,
## stacklevel=2
## )
return self.createFromToken(args[0], args[1])
if (len(args) == 3
and isinstance(args[0], (int, long))
and isinstance(args[1], Token)
and isinstance(args[2], basestring)
):
# Object create(int tokenType, Token fromToken, String text);
## warnings.warn(
## "Using create() is deprecated, use createFromToken()",
## DeprecationWarning,
## stacklevel=2
## )
return self.createFromToken(args[0], args[1], args[2])
if (len(args) == 2
and isinstance(args[0], (int, long))
and isinstance(args[1], basestring)
):
# Object create(int tokenType, String text);
## warnings.warn(
## "Using create() is deprecated, use createFromType()",
## DeprecationWarning,
## stacklevel=2
## )
return self.createFromType(args[0], args[1])
raise TypeError(
"No create method with this signature found: %s"
% (', '.join(type(v).__name__ for v in args))
)
############################################################################
#
# base implementation of Tree and TreeAdaptor
#
# Tree
# \- BaseTree
#
# TreeAdaptor
# \- BaseTreeAdaptor
#
############################################################################
class BaseTree(Tree):
"""
@brief A generic tree implementation with no payload.
You must subclass to
actually have any user data. ANTLR v3 uses a list of children approach
instead of the child-sibling approach in v2. A flat tree (a list) is
an empty node whose children represent the list. An empty, but
non-null node is called "nil".
"""
# BaseTree is abstract, no need to complain about not implemented abstract
# methods
# pylint: disable-msg=W0223
def __init__(self, node=None):
"""
Create a new node from an existing node does nothing for BaseTree
as there are no fields other than the children list, which cannot
be copied as the children are not considered part of this node.
"""
Tree.__init__(self)
self.children = []
self.parent = None
self.childIndex = 0
def getChild(self, i):
try:
return self.children[i]
except IndexError:
return None
def getChildren(self):
"""@brief Get the children internal List
Note that if you directly mess with
the list, do so at your own risk.
"""
# FIXME: mark as deprecated
return self.children
def getFirstChildWithType(self, treeType):
for child in self.children:
if child.getType() == treeType:
return child
return None
def getChildCount(self):
return len(self.children)
def addChild(self, childTree):
"""Add t as child of this node.
Warning: if t has no children, but child does
and child isNil then this routine moves children to t via
t.children = child.children; i.e., without copying the array.
"""
# this implementation is much simpler and probably less efficient
# than the mumbo-jumbo that Ter did for the Java runtime.
if childTree is None:
return
if childTree.isNil():
# t is an empty node possibly with children
if self.children is childTree.children:
raise ValueError("attempt to add child list to itself")
# fix parent pointer and childIndex for new children
for idx, child in enumerate(childTree.children):
child.parent = self
child.childIndex = len(self.children) + idx
self.children += childTree.children
else:
# child is not nil (don't care about children)
self.children.append(childTree)
childTree.parent = self
childTree.childIndex = len(self.children) - 1
def addChildren(self, children):
"""Add all elements of kids list as children of this node"""
self.children += children
def setChild(self, i, t):
if t is None:
return
if t.isNil():
raise ValueError("Can't set single child to a list")
self.children[i] = t
t.parent = self
t.childIndex = i
def deleteChild(self, i):
killed = self.children[i]
del self.children[i]
# walk rest and decrement their child indexes
for idx, child in enumerate(self.children[i:]):
child.childIndex = i + idx
return killed
def replaceChildren(self, startChildIndex, stopChildIndex, newTree):
"""
Delete children from start to stop and replace with t even if t is
a list (nil-root tree). num of children can increase or decrease.
For huge child lists, inserting children can force walking rest of
children to set their childindex; could be slow.
"""
if (startChildIndex >= len(self.children)
or stopChildIndex >= len(self.children)
):
raise IndexError("indexes invalid")
replacingHowMany = stopChildIndex - startChildIndex + 1
# normalize to a list of children to add: newChildren
if newTree.isNil():
newChildren = newTree.children
else:
newChildren = [newTree]
replacingWithHowMany = len(newChildren)
delta = replacingHowMany - replacingWithHowMany
if delta == 0:
# if same number of nodes, do direct replace
for idx, child in enumerate(newChildren):
self.children[idx + startChildIndex] = child
child.parent = self
child.childIndex = idx + startChildIndex
else:
# length of children changes...
# ...delete replaced segment...
del self.children[startChildIndex:stopChildIndex+1]
# ...insert new segment...
self.children[startChildIndex:startChildIndex] = newChildren
# ...and fix indeces
self.freshenParentAndChildIndexes(startChildIndex)
def isNil(self):
return False
def freshenParentAndChildIndexes(self, offset=0):
for idx, child in enumerate(self.children[offset:]):
child.childIndex = idx + offset
child.parent = self
def sanityCheckParentAndChildIndexes(self, parent=None, i=-1):
if parent != self.parent:
raise ValueError(
"parents don't match; expected %r found %r"
% (parent, self.parent)
)
if i != self.childIndex:
raise ValueError(
"child indexes don't match; expected %d found %d"
% (i, self.childIndex)
)
for idx, child in enumerate(self.children):
child.sanityCheckParentAndChildIndexes(self, idx)
def getChildIndex(self):
"""BaseTree doesn't track child indexes."""
return 0
def setChildIndex(self, index):
"""BaseTree doesn't track child indexes."""
pass
def getParent(self):
"""BaseTree doesn't track parent pointers."""
return None
def setParent(self, t):
"""BaseTree doesn't track parent pointers."""
pass
def toStringTree(self):
"""Print out a whole tree not just a node"""
if len(self.children) == 0:
return self.toString()
buf = []
if not self.isNil():
buf.append('(')
buf.append(self.toString())
buf.append(' ')
for i, child in enumerate(self.children):
if i > 0:
buf.append(' ')
buf.append(child.toStringTree())
if not self.isNil():
buf.append(')')
return ''.join(buf)
def getLine(self):
return 0
def getCharPositionInLine(self):
return 0
def toString(self):
"""Override to say how a node (not a tree) should look as text"""
raise NotImplementedError
class BaseTreeAdaptor(TreeAdaptor):
"""
@brief A TreeAdaptor that works with any Tree implementation.
"""
# BaseTreeAdaptor is abstract, no need to complain about not implemented
# abstract methods
# pylint: disable-msg=W0223
def nil(self):
return self.createWithPayload(None)
def errorNode(self, input, start, stop, exc):
"""
create tree node that holds the start and stop tokens associated
with an error.
If you specify your own kind of tree nodes, you will likely have to
override this method. CommonTree returns Token.INVALID_TOKEN_TYPE
if no token payload but you might have to set token type for diff
node type.
"""
return CommonErrorNode(input, start, stop, exc)
def isNil(self, tree):
return tree.isNil()
def dupTree(self, t, parent=None):
"""
This is generic in the sense that it will work with any kind of
tree (not just Tree interface). It invokes the adaptor routines
not the tree node routines to do the construction.
"""
if t is None:
return None
newTree = self.dupNode(t)
# ensure new subtree root has parent/child index set
# same index in new tree
self.setChildIndex(newTree, self.getChildIndex(t))
self.setParent(newTree, parent)
for i in range(self.getChildCount(t)):
child = self.getChild(t, i)
newSubTree = self.dupTree(child, t)
self.addChild(newTree, newSubTree)
return newTree
def addChild(self, tree, child):
"""
Add a child to the tree t. If child is a flat tree (a list), make all
in list children of t. Warning: if t has no children, but child does
and child isNil then you can decide it is ok to move children to t via
t.children = child.children; i.e., without copying the array. Just
make sure that this is consistent with have the user will build
ASTs.
"""
#if isinstance(child, Token):
# child = self.createWithPayload(child)
if tree is not None and child is not None:
tree.addChild(child)
def becomeRoot(self, newRoot, oldRoot):
"""
If oldRoot is a nil root, just copy or move the children to newRoot.
If not a nil root, make oldRoot a child of newRoot.
old=^(nil a b c), new=r yields ^(r a b c)
old=^(a b c), new=r yields ^(r ^(a b c))
If newRoot is a nil-rooted single child tree, use the single
child as the new root node.
old=^(nil a b c), new=^(nil r) yields ^(r a b c)
old=^(a b c), new=^(nil r) yields ^(r ^(a b c))
If oldRoot was null, it's ok, just return newRoot (even if isNil).
old=null, new=r yields r
old=null, new=^(nil r) yields ^(nil r)
Return newRoot. Throw an exception if newRoot is not a
simple node or nil root with a single child node--it must be a root
node. If newRoot is ^(nil x) return x as newRoot.
Be advised that it's ok for newRoot to point at oldRoot's
children; i.e., you don't have to copy the list. We are
constructing these nodes so we should have this control for
efficiency.
"""
if isinstance(newRoot, Token):
newRoot = self.create(newRoot)
if oldRoot is None:
return newRoot
if not isinstance(newRoot, CommonTree):
newRoot = self.createWithPayload(newRoot)
# handle ^(nil real-node)
if newRoot.isNil():
nc = newRoot.getChildCount()
if nc == 1:
newRoot = newRoot.getChild(0)
elif nc > 1:
# TODO: make tree run time exceptions hierarchy
raise RuntimeError("more than one node as root")
# add oldRoot to newRoot; addChild takes care of case where oldRoot
# is a flat list (i.e., nil-rooted tree). All children of oldRoot
# are added to newRoot.
newRoot.addChild(oldRoot)
return newRoot
def rulePostProcessing(self, root):
"""Transform ^(nil x) to x and nil to null"""
if root is not None and root.isNil():
if root.getChildCount() == 0:
root = None
elif root.getChildCount() == 1:
root = root.getChild(0)
# whoever invokes rule will set parent and child index
root.setParent(None)
root.setChildIndex(-1)
return root
def createFromToken(self, tokenType, fromToken, text=None):
assert isinstance(tokenType, (int, long)), type(tokenType).__name__
assert isinstance(fromToken, Token), type(fromToken).__name__
assert text is None or isinstance(text, basestring), type(text).__name__
fromToken = self.createToken(fromToken)
fromToken.type = tokenType
if text is not None:
fromToken.text = text
t = self.createWithPayload(fromToken)
return t
def createFromType(self, tokenType, text):
assert isinstance(tokenType, (int, long)), type(tokenType).__name__
assert isinstance(text, basestring), type(text).__name__
fromToken = self.createToken(tokenType=tokenType, text=text)
t = self.createWithPayload(fromToken)
return t
def getType(self, t):
return t.getType()
def setType(self, t, type):
raise RuntimeError("don't know enough about Tree node")
def getText(self, t):
return t.getText()
def setText(self, t, text):
raise RuntimeError("don't know enough about Tree node")
def getChild(self, t, i):
return t.getChild(i)
def setChild(self, t, i, child):
t.setChild(i, child)
def deleteChild(self, t, i):
return t.deleteChild(i)
def getChildCount(self, t):
return t.getChildCount()
def getUniqueID(self, node):
return hash(node)
def createToken(self, fromToken=None, tokenType=None, text=None):
"""
Tell me how to create a token for use with imaginary token nodes.
For example, there is probably no input symbol associated with imaginary
token DECL, but you need to create it as a payload or whatever for
the DECL node as in ^(DECL type ID).
If you care what the token payload objects' type is, you should
override this method and any other createToken variant.
"""
raise NotImplementedError
############################################################################
#
# common tree implementation
#
# Tree
# \- BaseTree
# \- CommonTree
# \- CommonErrorNode
#
# TreeAdaptor
# \- BaseTreeAdaptor
# \- CommonTreeAdaptor
#
############################################################################
class CommonTree(BaseTree):
"""@brief A tree node that is wrapper for a Token object.
After 3.0 release
while building tree rewrite stuff, it became clear that computing
parent and child index is very difficult and cumbersome. Better to
spend the space in every tree node. If you don't want these extra
fields, it's easy to cut them out in your own BaseTree subclass.
"""
def __init__(self, payload):
BaseTree.__init__(self)
# What token indexes bracket all tokens associated with this node
# and below?
self.startIndex = -1
self.stopIndex = -1
# Who is the parent node of this node; if null, implies node is root
self.parent = None
# What index is this node in the child list? Range: 0..n-1
self.childIndex = -1
# A single token is the payload
if payload is None:
self.token = None
elif isinstance(payload, CommonTree):
self.token = payload.token
self.startIndex = payload.startIndex
self.stopIndex = payload.stopIndex
elif payload is None or isinstance(payload, Token):
self.token = payload
else:
raise TypeError(type(payload).__name__)
def getToken(self):
return self.token
def dupNode(self):
return CommonTree(self)
def isNil(self):
return self.token is None
def getType(self):
if self.token is None:
return INVALID_TOKEN_TYPE
return self.token.getType()
type = property(getType)
def getText(self):
if self.token is None:
return None
return self.token.text
text = property(getText)
def getLine(self):
if self.token is None or self.token.getLine() == 0:
if self.getChildCount():
return self.getChild(0).getLine()
else:
return 0
return self.token.getLine()
line = property(getLine)
def getCharPositionInLine(self):
if self.token is None or self.token.getCharPositionInLine() == -1:
if self.getChildCount():
return self.getChild(0).getCharPositionInLine()
else:
return 0
else:
return self.token.getCharPositionInLine()
charPositionInLine = property(getCharPositionInLine)
def getTokenStartIndex(self):
if self.startIndex == -1 and self.token is not None:
return self.token.getTokenIndex()
return self.startIndex
def setTokenStartIndex(self, index):
self.startIndex = index
tokenStartIndex = property(getTokenStartIndex, setTokenStartIndex)
def getTokenStopIndex(self):
if self.stopIndex == -1 and self.token is not None:
return self.token.getTokenIndex()
return self.stopIndex
def setTokenStopIndex(self, index):
self.stopIndex = index
tokenStopIndex = property(getTokenStopIndex, setTokenStopIndex)
def getChildIndex(self):
#FIXME: mark as deprecated
return self.childIndex
def setChildIndex(self, idx):
#FIXME: mark as deprecated
self.childIndex = idx
def getParent(self):
#FIXME: mark as deprecated
return self.parent
def setParent(self, t):
#FIXME: mark as deprecated
self.parent = t
def toString(self):
if self.isNil():
return "nil"
if self.getType() == INVALID_TOKEN_TYPE:
return "<errornode>"
return self.token.text
__str__ = toString
def toStringTree(self):
if not self.children:
return self.toString()
ret = ''
if not self.isNil():
ret += '(%s ' % (self.toString())
ret += ' '.join([child.toStringTree() for child in self.children])
if not self.isNil():
ret += ')'
return ret
INVALID_NODE = CommonTree(INVALID_TOKEN)
class CommonErrorNode(CommonTree):
"""A node representing erroneous token range in token stream"""
def __init__(self, input, start, stop, exc):
CommonTree.__init__(self, None)
if (stop is None or
(stop.getTokenIndex() < start.getTokenIndex() and
stop.getType() != EOF
)
):
# sometimes resync does not consume a token (when LT(1) is
# in follow set. So, stop will be 1 to left to start. adjust.
# Also handle case where start is the first token and no token
# is consumed during recovery; LT(-1) will return null.
stop = start
self.input = input
self.start = start
self.stop = stop
self.trappedException = exc
def isNil(self):
return False
def getType(self):
return INVALID_TOKEN_TYPE
def getText(self):
if isinstance(self.start, Token):
i = self.start.getTokenIndex()
j = self.stop.getTokenIndex()
if self.stop.getType() == EOF:
j = self.input.size()
badText = self.input.toString(i, j)
elif isinstance(self.start, Tree):
badText = self.input.toString(self.start, self.stop)
else:
# people should subclass if they alter the tree type so this
# next one is for sure correct.
badText = "<unknown>"
return badText
def toString(self):
if isinstance(self.trappedException, MissingTokenException):
return ("<missing type: "
+ str(self.trappedException.getMissingType())
+ ">")
elif isinstance(self.trappedException, UnwantedTokenException):
return ("<extraneous: "
+ str(self.trappedException.getUnexpectedToken())
+ ", resync=" + self.getText() + ">")
elif isinstance(self.trappedException, MismatchedTokenException):
return ("<mismatched token: "
+ str(self.trappedException.token)
+ ", resync=" + self.getText() + ">")
elif isinstance(self.trappedException, NoViableAltException):
return ("<unexpected: "
+ str(self.trappedException.token)
+ ", resync=" + self.getText() + ">")
return "<error: "+self.getText()+">"
class CommonTreeAdaptor(BaseTreeAdaptor):
"""
@brief A TreeAdaptor that works with any Tree implementation.
It provides
really just factory methods; all the work is done by BaseTreeAdaptor.
If you would like to have different tokens created than ClassicToken
objects, you need to override this and then set the parser tree adaptor to
use your subclass.
To get your parser to build nodes of a different type, override
create(Token).
"""
def dupNode(self, treeNode):
"""
Duplicate a node. This is part of the factory;
override if you want another kind of node to be built.
I could use reflection to prevent having to override this
but reflection is slow.
"""
if treeNode is None:
return None
return treeNode.dupNode()
def createWithPayload(self, payload):
return CommonTree(payload)
def createToken(self, fromToken=None, tokenType=None, text=None):
"""
Tell me how to create a token for use with imaginary token nodes.
For example, there is probably no input symbol associated with imaginary
token DECL, but you need to create it as a payload or whatever for
the DECL node as in ^(DECL type ID).
If you care what the token payload objects' type is, you should
override this method and any other createToken variant.
"""
if fromToken is not None:
return CommonToken(oldToken=fromToken)
return CommonToken(type=tokenType, text=text)
def setTokenBoundaries(self, t, startToken, stopToken):
"""
Track start/stop token for subtree root created for a rule.
Only works with Tree nodes. For rules that match nothing,
seems like this will yield start=i and stop=i-1 in a nil node.
Might be useful info so I'll not force to be i..i.
"""
if t is None:
return
start = 0
stop = 0
if startToken is not None:
start = startToken.index
if stopToken is not None:
stop = stopToken.index
t.setTokenStartIndex(start)
t.setTokenStopIndex(stop)
def getTokenStartIndex(self, t):
if t is None:
return -1
return t.getTokenStartIndex()
def getTokenStopIndex(self, t):
if t is None:
return -1
return t.getTokenStopIndex()
def getText(self, t):
if t is None:
return None
return t.getText()
def getType(self, t):
if t is None:
return INVALID_TOKEN_TYPE
return t.getType()
def getToken(self, t):
"""
What is the Token associated with this node? If
you are not using CommonTree, then you must
override this in your own adaptor.
"""
if isinstance(t, CommonTree):
return t.getToken()
return None # no idea what to do
def getChild(self, t, i):
if t is None:
return None
return t.getChild(i)
def getChildCount(self, t):
if t is None:
return 0
return t.getChildCount()
def getParent(self, t):
return t.getParent()
def setParent(self, t, parent):
t.setParent(parent)
def getChildIndex(self, t):
return t.getChildIndex()
def setChildIndex(self, t, index):
t.setChildIndex(index)
def replaceChildren(self, parent, startChildIndex, stopChildIndex, t):
if parent is not None:
parent.replaceChildren(startChildIndex, stopChildIndex, t)
############################################################################
#
# streams
#
# TreeNodeStream
# \- BaseTree
# \- CommonTree
#
# TreeAdaptor
# \- BaseTreeAdaptor
# \- CommonTreeAdaptor
#
############################################################################
class TreeNodeStream(IntStream):
"""@brief A stream of tree nodes
It accessing nodes from a tree of some kind.
"""
# TreeNodeStream is abstract, no need to complain about not implemented
# abstract methods
# pylint: disable-msg=W0223
def get(self, i):
"""Get a tree node at an absolute index i; 0..n-1.
If you don't want to buffer up nodes, then this method makes no
sense for you.
"""
raise NotImplementedError
def LT(self, k):
"""
Get tree node at current input pointer + i ahead where i=1 is next node.
i<0 indicates nodes in the past. So LT(-1) is previous node, but
implementations are not required to provide results for k < -1.
LT(0) is undefined. For i>=n, return null.
Return null for LT(0) and any index that results in an absolute address
that is negative.
This is analogus to the LT() method of the TokenStream, but this
returns a tree node instead of a token. Makes code gen identical
for both parser and tree grammars. :)
"""
raise NotImplementedError
def getTreeSource(self):
"""
Where is this stream pulling nodes from? This is not the name, but
the object that provides node objects.
"""
raise NotImplementedError
def getTokenStream(self):
"""
If the tree associated with this stream was created from a TokenStream,
you can specify it here. Used to do rule $text attribute in tree
parser. Optional unless you use tree parser rule text attribute
or output=template and rewrite=true options.
"""
raise NotImplementedError
def getTreeAdaptor(self):
"""
What adaptor can tell me how to interpret/navigate nodes and
trees. E.g., get text of a node.
"""
raise NotImplementedError
def setUniqueNavigationNodes(self, uniqueNavigationNodes):
"""
As we flatten the tree, we use UP, DOWN nodes to represent
the tree structure. When debugging we need unique nodes
so we have to instantiate new ones. When doing normal tree
parsing, it's slow and a waste of memory to create unique
navigation nodes. Default should be false;
"""
raise NotImplementedError
def toString(self, start, stop):
"""
Return the text of all nodes from start to stop, inclusive.
If the stream does not buffer all the nodes then it can still
walk recursively from start until stop. You can always return
null or "" too, but users should not access $ruleLabel.text in
an action of course in that case.
"""
raise NotImplementedError
# REWRITING TREES (used by tree parser)
def replaceChildren(self, parent, startChildIndex, stopChildIndex, t):
"""
Replace from start to stop child index of parent with t, which might
be a list. Number of children may be different
after this call. The stream is notified because it is walking the
tree and might need to know you are monkeying with the underlying
tree. Also, it might be able to modify the node stream to avoid
restreaming for future phases.
If parent is null, don't do anything; must be at root of overall tree.
Can't replace whatever points to the parent externally. Do nothing.
"""
raise NotImplementedError
class CommonTreeNodeStream(TreeNodeStream):
"""@brief A buffered stream of tree nodes.
Nodes can be from a tree of ANY kind.
This node stream sucks all nodes out of the tree specified in
the constructor during construction and makes pointers into
the tree using an array of Object pointers. The stream necessarily
includes pointers to DOWN and UP and EOF nodes.
This stream knows how to mark/release for backtracking.
This stream is most suitable for tree interpreters that need to
jump around a lot or for tree parsers requiring speed (at cost of memory).
There is some duplicated functionality here with UnBufferedTreeNodeStream
but just in bookkeeping, not tree walking etc...
@see UnBufferedTreeNodeStream
"""
def __init__(self, *args):
TreeNodeStream.__init__(self)
if len(args) == 1:
adaptor = CommonTreeAdaptor()
tree = args[0]
elif len(args) == 2:
adaptor = args[0]
tree = args[1]
else:
raise TypeError("Invalid arguments")
# all these navigation nodes are shared and hence they
# cannot contain any line/column info
self.down = adaptor.createFromType(DOWN, "DOWN")
self.up = adaptor.createFromType(UP, "UP")
self.eof = adaptor.createFromType(EOF, "EOF")
# The complete mapping from stream index to tree node.
# This buffer includes pointers to DOWN, UP, and EOF nodes.
# It is built upon ctor invocation. The elements are type
# Object as we don't what the trees look like.
# Load upon first need of the buffer so we can set token types
# of interest for reverseIndexing. Slows us down a wee bit to
# do all of the if p==-1 testing everywhere though.
self.nodes = []
# Pull nodes from which tree?
self.root = tree
# IF this tree (root) was created from a token stream, track it.
self.tokens = None
# What tree adaptor was used to build these trees
self.adaptor = adaptor
# Reuse same DOWN, UP navigation nodes unless this is true
self.uniqueNavigationNodes = False
# The index into the nodes list of the current node (next node
# to consume). If -1, nodes array not filled yet.
self.p = -1
# Track the last mark() call result value for use in rewind().
self.lastMarker = None
# Stack of indexes used for push/pop calls
self.calls = []
def fillBuffer(self):
"""Walk tree with depth-first-search and fill nodes buffer.
Don't do DOWN, UP nodes if its a list (t is isNil).
"""
self._fillBuffer(self.root)
self.p = 0 # buffer of nodes intialized now
def _fillBuffer(self, t):
nil = self.adaptor.isNil(t)
if not nil:
self.nodes.append(t) # add this node
# add DOWN node if t has children
n = self.adaptor.getChildCount(t)
if not nil and n > 0:
self.addNavigationNode(DOWN)
# and now add all its children
for c in range(n):
self._fillBuffer(self.adaptor.getChild(t, c))
# add UP node if t has children
if not nil and n > 0:
self.addNavigationNode(UP)
def getNodeIndex(self, node):
"""What is the stream index for node? 0..n-1
Return -1 if node not found.
"""
if self.p == -1:
self.fillBuffer()
for i, t in enumerate(self.nodes):
if t == node:
return i
return -1
def addNavigationNode(self, ttype):
"""
As we flatten the tree, we use UP, DOWN nodes to represent
the tree structure. When debugging we need unique nodes
so instantiate new ones when uniqueNavigationNodes is true.
"""
navNode = None
if ttype == DOWN:
if self.hasUniqueNavigationNodes():
navNode = self.adaptor.createFromType(DOWN, "DOWN")
else:
navNode = self.down
else:
if self.hasUniqueNavigationNodes():
navNode = self.adaptor.createFromType(UP, "UP")
else:
navNode = self.up
self.nodes.append(navNode)
def get(self, i):
if self.p == -1:
self.fillBuffer()
return self.nodes[i]
def LT(self, k):
if self.p == -1:
self.fillBuffer()
if k == 0:
return None
if k < 0:
return self.LB(-k)
#System.out.print("LT(p="+p+","+k+")=");
if self.p + k - 1 >= len(self.nodes):
return self.eof
return self.nodes[self.p + k - 1]
def getCurrentSymbol(self):
return self.LT(1)
def LB(self, k):
"""Look backwards k nodes"""
if k == 0:
return None
if self.p - k < 0:
return None
return self.nodes[self.p - k]
def getTreeSource(self):
return self.root
def getSourceName(self):
return self.getTokenStream().getSourceName()
def getTokenStream(self):
return self.tokens
def setTokenStream(self, tokens):
self.tokens = tokens
def getTreeAdaptor(self):
return self.adaptor
def hasUniqueNavigationNodes(self):
return self.uniqueNavigationNodes
def setUniqueNavigationNodes(self, uniqueNavigationNodes):
self.uniqueNavigationNodes = uniqueNavigationNodes
def consume(self):
if self.p == -1:
self.fillBuffer()
self.p += 1
def LA(self, i):
return self.adaptor.getType(self.LT(i))
def mark(self):
if self.p == -1:
self.fillBuffer()
self.lastMarker = self.index()
return self.lastMarker
def release(self, marker=None):
# no resources to release
pass
def index(self):
return self.p
def rewind(self, marker=None):
if marker is None:
marker = self.lastMarker
self.seek(marker)
def seek(self, index):
if self.p == -1:
self.fillBuffer()
self.p = index
def push(self, index):
"""
Make stream jump to a new location, saving old location.
Switch back with pop().
"""
self.calls.append(self.p) # save current index
self.seek(index)
def pop(self):
"""
Seek back to previous index saved during last push() call.
Return top of stack (return index).
"""
ret = self.calls.pop(-1)
self.seek(ret)
return ret
def reset(self):
self.p = 0
self.lastMarker = 0
self.calls = []
def size(self):
if self.p == -1:
self.fillBuffer()
return len(self.nodes)
# TREE REWRITE INTERFACE
def replaceChildren(self, parent, startChildIndex, stopChildIndex, t):
if parent is not None:
self.adaptor.replaceChildren(
parent, startChildIndex, stopChildIndex, t
)
def __str__(self):
"""Used for testing, just return the token type stream"""
if self.p == -1:
self.fillBuffer()
return ' '.join([str(self.adaptor.getType(node))
for node in self.nodes
])
def toString(self, start, stop):
if start is None or stop is None:
return None
if self.p == -1:
self.fillBuffer()
#System.out.println("stop: "+stop);
#if ( start instanceof CommonTree )
# System.out.print("toString: "+((CommonTree)start).getToken()+", ");
#else
# System.out.println(start);
#if ( stop instanceof CommonTree )
# System.out.println(((CommonTree)stop).getToken());
#else
# System.out.println(stop);
# if we have the token stream, use that to dump text in order
if self.tokens is not None:
beginTokenIndex = self.adaptor.getTokenStartIndex(start)
endTokenIndex = self.adaptor.getTokenStopIndex(stop)
# if it's a tree, use start/stop index from start node
# else use token range from start/stop nodes
if self.adaptor.getType(stop) == UP:
endTokenIndex = self.adaptor.getTokenStopIndex(start)
elif self.adaptor.getType(stop) == EOF:
endTokenIndex = self.size() -2 # don't use EOF
return self.tokens.toString(beginTokenIndex, endTokenIndex)
# walk nodes looking for start
i, t = 0, None
for i, t in enumerate(self.nodes):
if t == start:
break
# now walk until we see stop, filling string buffer with text
buf = []
t = self.nodes[i]
while t != stop:
text = self.adaptor.getText(t)
if text is None:
text = " " + self.adaptor.getType(t)
buf.append(text)
i += 1
t = self.nodes[i]
# include stop node too
text = self.adaptor.getText(stop)
if text is None:
text = " " +self.adaptor.getType(stop)
buf.append(text)
return ''.join(buf)
## iterator interface
def __iter__(self):
if self.p == -1:
self.fillBuffer()
for node in self.nodes:
yield node
#############################################################################
#
# tree parser
#
#############################################################################
class TreeParser(BaseRecognizer):
"""@brief Baseclass for generated tree parsers.
A parser for a stream of tree nodes. "tree grammars" result in a subclass
of this. All the error reporting and recovery is shared with Parser via
the BaseRecognizer superclass.
"""
def __init__(self, input, state=None):
BaseRecognizer.__init__(self, state)
self.input = None
self.setTreeNodeStream(input)
def reset(self):
BaseRecognizer.reset(self) # reset all recognizer state variables
if self.input is not None:
self.input.seek(0) # rewind the input
def setTreeNodeStream(self, input):
"""Set the input stream"""
self.input = input
def getTreeNodeStream(self):
return self.input
def getSourceName(self):
return self.input.getSourceName()
def getCurrentInputSymbol(self, input):
return input.LT(1)
def getMissingSymbol(self, input, e, expectedTokenType, follow):
tokenText = "<missing " + self.tokenNames[expectedTokenType] + ">"
return CommonTree(CommonToken(type=expectedTokenType, text=tokenText))
def matchAny(self, ignore): # ignore stream, copy of this.input
"""
Match '.' in tree parser has special meaning. Skip node or
entire tree if node has children. If children, scan until
corresponding UP node.
"""
self._state.errorRecovery = False
look = self.input.LT(1)
if self.input.getTreeAdaptor().getChildCount(look) == 0:
self.input.consume() # not subtree, consume 1 node and return
return
# current node is a subtree, skip to corresponding UP.
# must count nesting level to get right UP
level = 0
tokenType = self.input.getTreeAdaptor().getType(look)
while tokenType != EOF and not (tokenType == UP and level==0):
self.input.consume()
look = self.input.LT(1)
tokenType = self.input.getTreeAdaptor().getType(look)
if tokenType == DOWN:
level += 1
elif tokenType == UP:
level -= 1
self.input.consume() # consume UP
def mismatch(self, input, ttype, follow):
"""
We have DOWN/UP nodes in the stream that have no line info; override.
plus we want to alter the exception type. Don't try to recover
from tree parser errors inline...
"""
raise MismatchedTreeNodeException(ttype, input)
def getErrorHeader(self, e):
"""
Prefix error message with the grammar name because message is
always intended for the programmer because the parser built
the input tree not the user.
"""
return (self.getGrammarFileName() +
": node from %sline %s:%s"
% (['', "after "][e.approximateLineInfo],
e.line,
e.charPositionInLine
)
)
def getErrorMessage(self, e, tokenNames):
"""
Tree parsers parse nodes they usually have a token object as
payload. Set the exception token and do the default behavior.
"""
if isinstance(self, TreeParser):
adaptor = e.input.getTreeAdaptor()
e.token = adaptor.getToken(e.node)
if e.token is not None: # could be an UP/DOWN node
e.token = CommonToken(
type=adaptor.getType(e.node),
text=adaptor.getText(e.node)
)
return BaseRecognizer.getErrorMessage(self, e, tokenNames)
def traceIn(self, ruleName, ruleIndex):
BaseRecognizer.traceIn(self, ruleName, ruleIndex, self.input.LT(1))
def traceOut(self, ruleName, ruleIndex):
BaseRecognizer.traceOut(self, ruleName, ruleIndex, self.input.LT(1))
#############################################################################
#
# streams for rule rewriting
#
#############################################################################
class RewriteRuleElementStream(object):
"""@brief Internal helper class.
A generic list of elements tracked in an alternative to be used in
a -> rewrite rule. We need to subclass to fill in the next() method,
which returns either an AST node wrapped around a token payload or
an existing subtree.
Once you start next()ing, do not try to add more elements. It will
break the cursor tracking I believe.
@see org.antlr.runtime.tree.RewriteRuleSubtreeStream
@see org.antlr.runtime.tree.RewriteRuleTokenStream
TODO: add mechanism to detect/puke on modification after reading from
stream
"""
def __init__(self, adaptor, elementDescription, elements=None):
# Cursor 0..n-1. If singleElement!=null, cursor is 0 until you next(),
# which bumps it to 1 meaning no more elements.
self.cursor = 0
# Track single elements w/o creating a list. Upon 2nd add, alloc list
self.singleElement = None
# The list of tokens or subtrees we are tracking
self.elements = None
# Once a node / subtree has been used in a stream, it must be dup'd
# from then on. Streams are reset after subrules so that the streams
# can be reused in future subrules. So, reset must set a dirty bit.
# If dirty, then next() always returns a dup.
self.dirty = False
# The element or stream description; usually has name of the token or
# rule reference that this list tracks. Can include rulename too, but
# the exception would track that info.
self.elementDescription = elementDescription
self.adaptor = adaptor
if isinstance(elements, (list, tuple)):
# Create a stream, but feed off an existing list
self.singleElement = None
self.elements = elements
else:
# Create a stream with one element
self.add(elements)
def reset(self):
"""
Reset the condition of this stream so that it appears we have
not consumed any of its elements. Elements themselves are untouched.
Once we reset the stream, any future use will need duplicates. Set
the dirty bit.
"""
self.cursor = 0
self.dirty = True
def add(self, el):
if el is None:
return
if self.elements is not None: # if in list, just add
self.elements.append(el)
return
if self.singleElement is None: # no elements yet, track w/o list
self.singleElement = el
return
# adding 2nd element, move to list
self.elements = []
self.elements.append(self.singleElement)
self.singleElement = None
self.elements.append(el)
def nextTree(self):
"""
Return the next element in the stream. If out of elements, throw
an exception unless size()==1. If size is 1, then return elements[0].
Return a duplicate node/subtree if stream is out of elements and
size==1. If we've already used the element, dup (dirty bit set).
"""
if (self.dirty
or (self.cursor >= len(self) and len(self) == 1)
):
# if out of elements and size is 1, dup
el = self._next()
return self.dup(el)
# test size above then fetch
el = self._next()
return el
def _next(self):
"""
do the work of getting the next element, making sure that it's
a tree node or subtree. Deal with the optimization of single-
element list versus list of size > 1. Throw an exception
if the stream is empty or we're out of elements and size>1.
protected so you can override in a subclass if necessary.
"""
if len(self) == 0:
raise RewriteEmptyStreamException(self.elementDescription)
if self.cursor >= len(self): # out of elements?
if len(self) == 1: # if size is 1, it's ok; return and we'll dup
return self.toTree(self.singleElement)
# out of elements and size was not 1, so we can't dup
raise RewriteCardinalityException(self.elementDescription)
# we have elements
if self.singleElement is not None:
self.cursor += 1 # move cursor even for single element list
return self.toTree(self.singleElement)
# must have more than one in list, pull from elements
o = self.toTree(self.elements[self.cursor])
self.cursor += 1
return o
def dup(self, el):
"""
When constructing trees, sometimes we need to dup a token or AST
subtree. Dup'ing a token means just creating another AST node
around it. For trees, you must call the adaptor.dupTree() unless
the element is for a tree root; then it must be a node dup.
"""
raise NotImplementedError
def toTree(self, el):
"""
Ensure stream emits trees; tokens must be converted to AST nodes.
AST nodes can be passed through unmolested.
"""
return el
def hasNext(self):
return ( (self.singleElement is not None and self.cursor < 1)
or (self.elements is not None
and self.cursor < len(self.elements)
)
)
def size(self):
if self.singleElement is not None:
return 1
if self.elements is not None:
return len(self.elements)
return 0
__len__ = size
def getDescription(self):
"""Deprecated. Directly access elementDescription attribute"""
return self.elementDescription
class RewriteRuleTokenStream(RewriteRuleElementStream):
"""@brief Internal helper class."""
def toTree(self, el):
# Don't convert to a tree unless they explicitly call nextTree.
# This way we can do hetero tree nodes in rewrite.
return el
def nextNode(self):
t = self._next()
return self.adaptor.createWithPayload(t)
def nextToken(self):
return self._next()
def dup(self, el):
raise TypeError("dup can't be called for a token stream.")
class RewriteRuleSubtreeStream(RewriteRuleElementStream):
"""@brief Internal helper class."""
def nextNode(self):
"""
Treat next element as a single node even if it's a subtree.
This is used instead of next() when the result has to be a
tree root node. Also prevents us from duplicating recently-added
children; e.g., ^(type ID)+ adds ID to type and then 2nd iteration
must dup the type node, but ID has been added.
Referencing a rule result twice is ok; dup entire tree as
we can't be adding trees as root; e.g., expr expr.
Hideous code duplication here with super.next(). Can't think of
a proper way to refactor. This needs to always call dup node
and super.next() doesn't know which to call: dup node or dup tree.
"""
if (self.dirty
or (self.cursor >= len(self) and len(self) == 1)
):
# if out of elements and size is 1, dup (at most a single node
# since this is for making root nodes).
el = self._next()
return self.adaptor.dupNode(el)
# test size above then fetch
el = self._next()
return el
def dup(self, el):
return self.adaptor.dupTree(el)
class RewriteRuleNodeStream(RewriteRuleElementStream):
"""
Queues up nodes matched on left side of -> in a tree parser. This is
the analog of RewriteRuleTokenStream for normal parsers.
"""
def nextNode(self):
return self._next()
def toTree(self, el):
return self.adaptor.dupNode(el)
def dup(self, el):
# we dup every node, so don't have to worry about calling dup; short-
#circuited next() so it doesn't call.
raise TypeError("dup can't be called for a node stream.")
class TreeRuleReturnScope(RuleReturnScope):
"""
This is identical to the ParserRuleReturnScope except that
the start property is a tree nodes not Token object
when you are parsing trees. To be generic the tree node types
have to be Object.
"""
def __init__(self):
self.start = None
self.tree = None
def getStart(self):
return self.start
def getTree(self):
return self.tree
|
opennetworkinglab/spring-open-cli | refs/heads/master | sdncon/coreui/templatetags/__init__.py | 86 | #
# Copyright (c) 2013 Big Switch Networks, Inc.
#
# Licensed under the Eclipse Public License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.eclipse.org/legal/epl-v10.html
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
|
nguyenkims/projecteuler-python | refs/heads/master | src/p82.py | 1 | A=[]
limit = 10 ** 6
def fillA() :
B = []
inp = file('matrix.txt')
t = inp.readline()
while t!="":
# K = t.strip().split()
K = t.strip().split(',')
B.append(K)
t = inp.readline()
# print B
for i in range(0,len(B)):
A.append([])
for b in B:
for i in range(0,len(B)):
A[i].append(int(b[i]))
# print A
print len(A)
fillA()
L= len(A)
print 'L=',L
def distance(m,i,n,j):
'''return the distance from (m,i) to (n,j)
note that m,n is the column number and i,j are row number'''
if m > n: return -1
elif m == n:
#A[m][i] -> A[m][j]
s = 0
if i < j:
for k in range(i,j+1): s+= A[m][k]
else:
for k in range(j,i+1): s+= A[m][k]
return s
elif m + 1 == n:
t1= A[m][i] + distance(n,i,n,j)
t2= distance(m,i,m,j) + A[n][j]
if t1< t2: return t1
else: return t2
else:
minimum = limit
middle = int((m+n)/2)
for k in range(0,len(A[m])) :
s = distance(m,i,middle,k) + distance(middle,k,n,j) - A[middle][k]
# print s,minimum,middle,k,distance(m,i,middle,k), distance(middle,k,n,j)
if s < minimum:
# print s,minimum,middle,k,distance(m,i,middle,k), distance(middle,k,n,j)
minimum = s
return minimum
def fillColumn(n):
'''corresponding to each case in n th column, return the distance
from the first column '''
l = []
if n == 0:
for i in range(0,L):
l.append(A[0][i])
return l
else:
l1 = fillColumn(n-1)
for k in range(0,L) :
minimum = limit
for i in range(0,L) :
t= l1[i] + distance (n-1,i,n,k) - A[n-1][i]
if t < minimum: minimum = t
l.append(minimum)
return l
X= fillColumn(79)
print X, min(X)
|
nttks/jenkins-test | refs/heads/gacco/birch | common/lib/xmodule/xmodule/tests/test_xml_module.py | 12 | # disable missing docstring
# pylint: disable=missing-docstring
import unittest
from mock import Mock
from nose.tools import assert_equals, assert_not_equals, assert_true, assert_false, assert_in, assert_not_in # pylint: disable=no-name-in-module
from xblock.field_data import DictFieldData
from xblock.fields import Scope, String, Dict, Boolean, Integer, Float, Any, List
from xblock.runtime import KvsFieldData, DictKeyValueStore
from xmodule.fields import Date, Timedelta, RelativeTime
from xmodule.modulestore.inheritance import InheritanceKeyValueStore, InheritanceMixin, InheritingFieldData
from xmodule.xml_module import XmlDescriptor, serialize_field, deserialize_field
from xmodule.course_module import CourseDescriptor
from xmodule.seq_module import SequenceDescriptor
from xmodule.x_module import XModuleMixin
from xmodule.tests import get_test_descriptor_system
from xmodule.tests.xml import XModuleXmlImportTest
from xmodule.tests.xml.factories import CourseFactory, SequenceFactory, ProblemFactory
class CrazyJsonString(String):
def to_json(self, value):
return value + " JSON"
class TestFields(object):
# Will be returned by editable_metadata_fields.
max_attempts = Integer(scope=Scope.settings, default=1000, values={'min': 1, 'max': 10})
# Will not be returned by editable_metadata_fields because filtered out by non_editable_metadata_fields.
due = Date(scope=Scope.settings)
# Will not be returned by editable_metadata_fields because is not Scope.settings.
student_answers = Dict(scope=Scope.user_state)
# Will be returned, and can override the inherited value from XModule.
display_name = String(scope=Scope.settings, default='local default', display_name='Local Display Name',
help='local help')
# Used for testing select type, effect of to_json method
string_select = CrazyJsonString(
scope=Scope.settings,
default='default value',
values=[{'display_name': 'first', 'value': 'value a'},
{'display_name': 'second', 'value': 'value b'}]
)
showanswer = InheritanceMixin.showanswer
# Used for testing select type
float_select = Float(scope=Scope.settings, default=.999, values=[1.23, 0.98])
# Used for testing float type
float_non_select = Float(scope=Scope.settings, default=.999, values={'min': 0, 'step': .3})
# Used for testing that Booleans get mapped to select type
boolean_select = Boolean(scope=Scope.settings)
# Used for testing Lists
list_field = List(scope=Scope.settings, default=[])
class InheritingFieldDataTest(unittest.TestCase):
"""Tests of InheritingFieldData."""
class TestableInheritingXBlock(XmlDescriptor):
"""An XBlock we can use in these tests."""
inherited = String(scope=Scope.settings, default="the default")
not_inherited = String(scope=Scope.settings, default="nothing")
def setUp(self):
self.system = get_test_descriptor_system()
self.all_blocks = {}
self.system.get_block = self.all_blocks.get
self.field_data = InheritingFieldData(
inheritable_names=['inherited'],
kvs=DictKeyValueStore({}),
)
def get_a_block(self, usage_id=None):
"""Construct an XBlock for testing with."""
scope_ids = Mock()
if usage_id is None:
usage_id = "_auto%d" % len(self.all_blocks)
scope_ids.usage_id = usage_id
block = self.system.construct_xblock_from_class(
self.TestableInheritingXBlock,
field_data=self.field_data,
scope_ids=scope_ids,
)
self.all_blocks[usage_id] = block
return block
def test_default_value(self):
# Blocks with nothing set with return the fields' defaults.
block = self.get_a_block()
self.assertEqual(block.inherited, "the default")
self.assertEqual(block.not_inherited, "nothing")
def test_set_value(self):
# If you set a value, that's what you get back.
block = self.get_a_block()
block.inherited = "Changed!"
block.not_inherited = "New Value!"
self.assertEqual(block.inherited, "Changed!")
self.assertEqual(block.not_inherited, "New Value!")
def test_inherited(self):
# A child with get a value inherited from the parent.
parent = self.get_a_block(usage_id="parent")
parent.inherited = "Changed!"
self.assertEqual(parent.inherited, "Changed!")
child = self.get_a_block(usage_id="child")
child.parent = "parent"
self.assertEqual(child.inherited, "Changed!")
def test_inherited_across_generations(self):
# A child with get a value inherited from a great-grandparent.
parent = self.get_a_block(usage_id="parent")
parent.inherited = "Changed!"
self.assertEqual(parent.inherited, "Changed!")
for child_num in range(10):
usage_id = "child_{}".format(child_num)
child = self.get_a_block(usage_id=usage_id)
child.parent = "parent"
self.assertEqual(child.inherited, "Changed!")
def test_not_inherited(self):
# Fields not in the inherited_names list won't be inherited.
parent = self.get_a_block(usage_id="parent")
parent.not_inherited = "Changed!"
self.assertEqual(parent.not_inherited, "Changed!")
child = self.get_a_block(usage_id="child")
child.parent = "parent"
self.assertEqual(child.not_inherited, "nothing")
class EditableMetadataFieldsTest(unittest.TestCase):
def test_display_name_field(self):
editable_fields = self.get_xml_editable_fields(DictFieldData({}))
# Tests that the xblock fields (currently tags and name) get filtered out.
# Also tests that xml_attributes is filtered out of XmlDescriptor.
self.assertEqual(1, len(editable_fields), editable_fields)
self.assert_field_values(
editable_fields, 'display_name', XModuleMixin.display_name,
explicitly_set=False, value=None, default_value=None
)
def test_override_default(self):
# Tests that explicitly_set is correct when a value overrides the default (not inheritable).
editable_fields = self.get_xml_editable_fields(DictFieldData({'display_name': 'foo'}))
self.assert_field_values(
editable_fields, 'display_name', XModuleMixin.display_name,
explicitly_set=True, value='foo', default_value=None
)
def test_integer_field(self):
descriptor = self.get_descriptor(DictFieldData({'max_attempts': '7'}))
editable_fields = descriptor.editable_metadata_fields
self.assertEqual(8, len(editable_fields))
self.assert_field_values(
editable_fields, 'max_attempts', TestFields.max_attempts,
explicitly_set=True, value=7, default_value=1000, type='Integer',
options=TestFields.max_attempts.values
)
self.assert_field_values(
editable_fields, 'display_name', TestFields.display_name,
explicitly_set=False, value='local default', default_value='local default'
)
editable_fields = self.get_descriptor(DictFieldData({})).editable_metadata_fields
self.assert_field_values(
editable_fields, 'max_attempts', TestFields.max_attempts,
explicitly_set=False, value=1000, default_value=1000, type='Integer',
options=TestFields.max_attempts.values
)
def test_inherited_field(self):
kvs = InheritanceKeyValueStore(initial_values={}, inherited_settings={'showanswer': 'inherited'})
model_data = KvsFieldData(kvs)
descriptor = self.get_descriptor(model_data)
editable_fields = descriptor.editable_metadata_fields
self.assert_field_values(
editable_fields, 'showanswer', InheritanceMixin.showanswer,
explicitly_set=False, value='inherited', default_value='inherited'
)
# Mimic the case where display_name WOULD have been inherited, except we explicitly set it.
kvs = InheritanceKeyValueStore(
initial_values={'showanswer': 'explicit'},
inherited_settings={'showanswer': 'inheritable value'}
)
model_data = KvsFieldData(kvs)
descriptor = self.get_descriptor(model_data)
editable_fields = descriptor.editable_metadata_fields
self.assert_field_values(
editable_fields, 'showanswer', InheritanceMixin.showanswer,
explicitly_set=True, value='explicit', default_value='inheritable value'
)
def test_type_and_options(self):
# test_display_name_field verifies that a String field is of type "Generic".
# test_integer_field verifies that a Integer field is of type "Integer".
descriptor = self.get_descriptor(DictFieldData({}))
editable_fields = descriptor.editable_metadata_fields
# Tests for select
self.assert_field_values(
editable_fields, 'string_select', TestFields.string_select,
explicitly_set=False, value='default value', default_value='default value',
type='Select', options=[{'display_name': 'first', 'value': 'value a JSON'},
{'display_name': 'second', 'value': 'value b JSON'}]
)
self.assert_field_values(
editable_fields, 'float_select', TestFields.float_select,
explicitly_set=False, value=.999, default_value=.999,
type='Select', options=[1.23, 0.98]
)
self.assert_field_values(
editable_fields, 'boolean_select', TestFields.boolean_select,
explicitly_set=False, value=None, default_value=None,
type='Select', options=[{'display_name': "True", "value": True}, {'display_name': "False", "value": False}]
)
# Test for float
self.assert_field_values(
editable_fields, 'float_non_select', TestFields.float_non_select,
explicitly_set=False, value=.999, default_value=.999,
type='Float', options={'min': 0, 'step': .3}
)
self.assert_field_values(
editable_fields, 'list_field', TestFields.list_field,
explicitly_set=False, value=[], default_value=[],
type='List'
)
# Start of helper methods
def get_xml_editable_fields(self, field_data):
runtime = get_test_descriptor_system()
return runtime.construct_xblock_from_class(
XmlDescriptor,
scope_ids=Mock(),
field_data=field_data,
).editable_metadata_fields
def get_descriptor(self, field_data):
class TestModuleDescriptor(TestFields, XmlDescriptor):
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(TestModuleDescriptor, self).non_editable_metadata_fields
non_editable_fields.append(TestModuleDescriptor.due)
return non_editable_fields
system = get_test_descriptor_system()
system.render_template = Mock(return_value="<div>Test Template HTML</div>")
return system.construct_xblock_from_class(TestModuleDescriptor, field_data=field_data, scope_ids=Mock())
def assert_field_values(self, editable_fields, name, field, explicitly_set, value, default_value,
type='Generic', options=[]):
test_field = editable_fields[name]
self.assertEqual(field.name, test_field['field_name'])
self.assertEqual(field.display_name, test_field['display_name'])
self.assertEqual(field.help, test_field['help'])
self.assertEqual(field.to_json(value), test_field['value'])
self.assertEqual(field.to_json(default_value), test_field['default_value'])
self.assertEqual(options, test_field['options'])
self.assertEqual(type, test_field['type'])
self.assertEqual(explicitly_set, test_field['explicitly_set'])
class TestSerialize(unittest.TestCase):
""" Tests the serialize, method, which is not dependent on type. """
def test_serialize(self):
assert_equals('null', serialize_field(None))
assert_equals('-2', serialize_field(-2))
assert_equals('2', serialize_field('2'))
assert_equals('-3.41', serialize_field(-3.41))
assert_equals('2.589', serialize_field('2.589'))
assert_equals('false', serialize_field(False))
assert_equals('false', serialize_field('false'))
assert_equals('fAlse', serialize_field('fAlse'))
assert_equals('hat box', serialize_field('hat box'))
assert_equals('{"bar": "hat", "frog": "green"}', serialize_field({'bar': 'hat', 'frog': 'green'}))
assert_equals('[3.5, 5.6]', serialize_field([3.5, 5.6]))
assert_equals('["foo", "bar"]', serialize_field(['foo', 'bar']))
assert_equals('2012-12-31T23:59:59Z', serialize_field("2012-12-31T23:59:59Z"))
assert_equals('1 day 12 hours 59 minutes 59 seconds',
serialize_field("1 day 12 hours 59 minutes 59 seconds"))
class TestDeserialize(unittest.TestCase):
def assertDeserializeEqual(self, expected, arg):
"""
Asserts the result of deserialize_field.
"""
assert_equals(expected, deserialize_field(self.test_field(), arg))
def assertDeserializeNonString(self):
"""
Asserts input value is returned for None or something that is not a string.
For all types, 'null' is also always returned as None.
"""
self.assertDeserializeEqual(None, None)
self.assertDeserializeEqual(3.14, 3.14)
self.assertDeserializeEqual(True, True)
self.assertDeserializeEqual([10], [10])
self.assertDeserializeEqual({}, {})
self.assertDeserializeEqual([], [])
self.assertDeserializeEqual(None, 'null')
class TestDeserializeInteger(TestDeserialize):
""" Tests deserialize as related to Integer type. """
test_field = Integer
def test_deserialize(self):
self.assertDeserializeEqual(-2, '-2')
self.assertDeserializeEqual("450", '"450"')
# False can be parsed as a int (converts to 0)
self.assertDeserializeEqual(False, 'false')
# True can be parsed as a int (converts to 1)
self.assertDeserializeEqual(True, 'true')
# 2.78 can be converted to int, so the string will be deserialized
self.assertDeserializeEqual(-2.78, '-2.78')
def test_deserialize_unsupported_types(self):
self.assertDeserializeEqual('[3]', '[3]')
# '2.78' cannot be converted to int, so input value is returned
self.assertDeserializeEqual('"-2.78"', '"-2.78"')
# 'false' cannot be converted to int, so input value is returned
self.assertDeserializeEqual('"false"', '"false"')
self.assertDeserializeNonString()
class TestDeserializeFloat(TestDeserialize):
""" Tests deserialize as related to Float type. """
test_field = Float
def test_deserialize(self):
self.assertDeserializeEqual(-2, '-2')
self.assertDeserializeEqual("450", '"450"')
self.assertDeserializeEqual(-2.78, '-2.78')
self.assertDeserializeEqual("0.45", '"0.45"')
# False can be parsed as a float (converts to 0)
self.assertDeserializeEqual(False, 'false')
# True can be parsed as a float (converts to 1)
self.assertDeserializeEqual(True, 'true')
def test_deserialize_unsupported_types(self):
self.assertDeserializeEqual('[3]', '[3]')
# 'false' cannot be converted to float, so input value is returned
self.assertDeserializeEqual('"false"', '"false"')
self.assertDeserializeNonString()
class TestDeserializeBoolean(TestDeserialize):
""" Tests deserialize as related to Boolean type. """
test_field = Boolean
def test_deserialize(self):
# json.loads converts the value to Python bool
self.assertDeserializeEqual(False, 'false')
self.assertDeserializeEqual(True, 'true')
# json.loads fails, string value is returned.
self.assertDeserializeEqual('False', 'False')
self.assertDeserializeEqual('True', 'True')
# json.loads deserializes as a string
self.assertDeserializeEqual('false', '"false"')
self.assertDeserializeEqual('fAlse', '"fAlse"')
self.assertDeserializeEqual("TruE", '"TruE"')
# 2.78 can be converted to a bool, so the string will be deserialized
self.assertDeserializeEqual(-2.78, '-2.78')
self.assertDeserializeNonString()
class TestDeserializeString(TestDeserialize):
""" Tests deserialize as related to String type. """
test_field = String
def test_deserialize(self):
self.assertDeserializeEqual('hAlf', '"hAlf"')
self.assertDeserializeEqual('false', '"false"')
self.assertDeserializeEqual('single quote', 'single quote')
def test_deserialize_unsupported_types(self):
self.assertDeserializeEqual('3.4', '3.4')
self.assertDeserializeEqual('false', 'false')
self.assertDeserializeEqual('2', '2')
self.assertDeserializeEqual('[3]', '[3]')
self.assertDeserializeNonString()
class TestDeserializeAny(TestDeserialize):
""" Tests deserialize as related to Any type. """
test_field = Any
def test_deserialize(self):
self.assertDeserializeEqual('hAlf', '"hAlf"')
self.assertDeserializeEqual('false', '"false"')
self.assertDeserializeEqual({'bar': 'hat', 'frog': 'green'}, '{"bar": "hat", "frog": "green"}')
self.assertDeserializeEqual([3.5, 5.6], '[3.5, 5.6]')
self.assertDeserializeEqual('[', '[')
self.assertDeserializeEqual(False, 'false')
self.assertDeserializeEqual(3.4, '3.4')
self.assertDeserializeNonString()
class TestDeserializeList(TestDeserialize):
""" Tests deserialize as related to List type. """
test_field = List
def test_deserialize(self):
self.assertDeserializeEqual(['foo', 'bar'], '["foo", "bar"]')
self.assertDeserializeEqual([3.5, 5.6], '[3.5, 5.6]')
self.assertDeserializeEqual([], '[]')
def test_deserialize_unsupported_types(self):
self.assertDeserializeEqual('3.4', '3.4')
self.assertDeserializeEqual('false', 'false')
self.assertDeserializeEqual('2', '2')
self.assertDeserializeNonString()
class TestDeserializeDate(TestDeserialize):
""" Tests deserialize as related to Date type. """
test_field = Date
def test_deserialize(self):
self.assertDeserializeEqual('2012-12-31T23:59:59Z', "2012-12-31T23:59:59Z")
self.assertDeserializeEqual('2012-12-31T23:59:59Z', '"2012-12-31T23:59:59Z"')
self.assertDeserializeNonString()
class TestDeserializeTimedelta(TestDeserialize):
""" Tests deserialize as related to Timedelta type. """
test_field = Timedelta
def test_deserialize(self):
self.assertDeserializeEqual(
'1 day 12 hours 59 minutes 59 seconds',
'1 day 12 hours 59 minutes 59 seconds'
)
self.assertDeserializeEqual(
'1 day 12 hours 59 minutes 59 seconds',
'"1 day 12 hours 59 minutes 59 seconds"'
)
self.assertDeserializeNonString()
class TestDeserializeRelativeTime(TestDeserialize):
""" Tests deserialize as related to Timedelta type. """
test_field = RelativeTime
def test_deserialize(self):
"""
There is no check for
self.assertDeserializeEqual('10:20:30', '10:20:30')
self.assertDeserializeNonString()
because these two tests work only because json.loads fires exception,
and xml_module.deserialized_field catches it and returns same value,
so there is nothing field-specific here.
But other modules do it, so I'm leaving this comment for PR reviewers.
"""
# test that from_json produces no exceptions
self.assertDeserializeEqual('10:20:30', '"10:20:30"')
class TestXmlAttributes(XModuleXmlImportTest):
def test_unknown_attribute(self):
assert_false(hasattr(CourseDescriptor, 'unknown_attr'))
course = self.process_xml(CourseFactory.build(unknown_attr='value'))
assert_false(hasattr(course, 'unknown_attr'))
assert_equals('value', course.xml_attributes['unknown_attr'])
def test_known_attribute(self):
assert_true(hasattr(CourseDescriptor, 'show_chat'))
course = self.process_xml(CourseFactory.build(show_chat='true'))
assert_true(course.show_chat)
assert_not_in('show_chat', course.xml_attributes)
def test_rerandomize_in_policy(self):
# Rerandomize isn't a basic attribute of Sequence
assert_false(hasattr(SequenceDescriptor, 'rerandomize'))
root = SequenceFactory.build(policy={'rerandomize': 'never'})
ProblemFactory.build(parent=root)
seq = self.process_xml(root)
# Rerandomize is added to the constructed sequence via the InheritanceMixin
assert_equals('never', seq.rerandomize)
# Rerandomize is a known value coming from policy, and shouldn't appear
# in xml_attributes
assert_not_in('rerandomize', seq.xml_attributes)
def test_attempts_in_policy(self):
# attempts isn't a basic attribute of Sequence
assert_false(hasattr(SequenceDescriptor, 'attempts'))
root = SequenceFactory.build(policy={'attempts': '1'})
ProblemFactory.build(parent=root)
seq = self.process_xml(root)
# attempts isn't added to the constructed sequence, because
# it's not in the InheritanceMixin
assert_false(hasattr(seq, 'attempts'))
# attempts is an unknown attribute, so we should include it
# in xml_attributes so that it gets written out (despite the misleading
# name)
assert_in('attempts', seq.xml_attributes)
def check_inheritable_attribute(self, attribute, value):
# `attribute` isn't a basic attribute of Sequence
assert_false(hasattr(SequenceDescriptor, attribute))
# `attribute` is added by InheritanceMixin
assert_true(hasattr(InheritanceMixin, attribute))
root = SequenceFactory.build(policy={attribute: str(value)})
ProblemFactory.build(parent=root)
# InheritanceMixin will be used when processing the XML
assert_in(InheritanceMixin, root.xblock_mixins)
seq = self.process_xml(root)
assert_equals(seq.unmixed_class, SequenceDescriptor)
assert_not_equals(type(seq), SequenceDescriptor)
# `attribute` is added to the constructed sequence, because
# it's in the InheritanceMixin
assert_equals(value, getattr(seq, attribute))
# `attribute` is a known attribute, so we shouldn't include it
# in xml_attributes
assert_not_in(attribute, seq.xml_attributes)
def test_inheritable_attributes(self):
self.check_inheritable_attribute('days_early_for_beta', 2)
self.check_inheritable_attribute('max_attempts', 5)
self.check_inheritable_attribute('visible_to_staff_only', True)
|
criteo-forks/graphite-web | refs/heads/master | webapp/graphite/functions/views.py | 4 | import json
from graphite.util import jsonResponse, HttpResponse, HttpError
from graphite.functions import SeriesFunctions, SeriesFunction, PieFunctions, PieFunction, functionInfo
class jsonInfinityEncoder(json.JSONEncoder):
def encode(self, o):
return super(jsonInfinityEncoder, self).encode(o).replace('Infinity,', '1e9999,')
def default(self, o):
if hasattr(o, 'toJSON'):
return o.toJSON()
return o.__dict__
@jsonResponse(encoder=jsonInfinityEncoder)
def functionList(request, queryParams):
if request.method != 'GET':
return HttpResponse(status=405)
if queryParams.get('type') == 'pie':
funcs = PieFunctions()
else:
funcs = SeriesFunctions()
grouped = queryParams.get('grouped', '').lower() in ['1', 'true']
group = queryParams.get('group')
result = {}
for (name, func) in funcs.items():
info = functionInfo(name, func)
if group is not None and group != info['group']:
continue
if grouped:
if info['group'] not in result:
result[info['group']] = {}
result[info['group']][name] = info
else:
result[name] = info
return result
@jsonResponse(encoder=jsonInfinityEncoder)
def functionDetails(request, queryParams, name):
if request.method != 'GET':
return HttpResponse(status=405)
try:
if queryParams.get('type') == 'pie':
func = PieFunction(name)
else:
func = SeriesFunction(name)
except KeyError:
raise HttpError('Function not found: %s' % name, status=404)
return functionInfo(name, func)
|
LiaoPan/scikit-learn | refs/heads/master | benchmarks/bench_plot_fastkmeans.py | 294 | from __future__ import print_function
from collections import defaultdict
from time import time
import numpy as np
from numpy import random as nr
from sklearn.cluster.k_means_ import KMeans, MiniBatchKMeans
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
chunk = 100
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
data = nr.random_integers(-50, 50, (n_samples, n_features))
print('K-Means')
tstart = time()
kmeans = KMeans(init='k-means++', n_clusters=10).fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.5f" % kmeans.inertia_)
print()
results['kmeans_speed'].append(delta)
results['kmeans_quality'].append(kmeans.inertia_)
print('Fast K-Means')
# let's prepare the data in small chunks
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=10,
batch_size=chunk)
tstart = time()
mbkmeans.fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %f" % mbkmeans.inertia_)
print()
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
def compute_bench_2(chunks):
results = defaultdict(lambda: [])
n_features = 50000
means = np.array([[1, 1], [-1, -1], [1, -1], [-1, 1],
[0.5, 0.5], [0.75, -0.5], [-1, 0.75], [1, 0]])
X = np.empty((0, 2))
for i in range(8):
X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)]
max_it = len(chunks)
it = 0
for chunk in chunks:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
print('Fast K-Means')
tstart = time()
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=8,
batch_size=chunk)
mbkmeans.fit(X)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.3fs" % mbkmeans.inertia_)
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 150, 5).astype(np.int)
features_range = np.linspace(150, 50000, 5).astype(np.int)
chunks = np.linspace(500, 10000, 15).astype(np.int)
results = compute_bench(samples_range, features_range)
results_2 = compute_bench_2(chunks)
max_time = max([max(i) for i in [t for (label, t) in results.iteritems()
if "speed" in label]])
max_inertia = max([max(i) for i in [
t for (label, t) in results.iteritems()
if "speed" not in label]])
fig = plt.figure('scikit-learn K-Means benchmark results')
for c, (label, timings) in zip('brcy',
sorted(results.iteritems())):
if 'speed' in label:
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.set_zlim3d(0.0, max_time * 1.1)
else:
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.set_zlim3d(0.0, max_inertia * 1.1)
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
i = 0
for c, (label, timings) in zip('br',
sorted(results_2.iteritems())):
i += 1
ax = fig.add_subplot(2, 2, i + 2)
y = np.asarray(timings)
ax.plot(chunks, y, color=c, alpha=0.8)
ax.set_xlabel('Chunks')
ax.set_ylabel(label)
plt.show()
|
lunafeng/django | refs/heads/master | tests/view_tests/models.py | 281 | """
Regression tests for Django built-in views.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
def get_absolute_url(self):
return '/authors/%s/' % self.id
@python_2_unicode_compatible
class BaseArticle(models.Model):
"""
An abstract article Model so that we can create article models with and
without a get_absolute_url method (for create_update generic views tests).
"""
title = models.CharField(max_length=100)
slug = models.SlugField()
author = models.ForeignKey(Author, models.CASCADE)
class Meta:
abstract = True
def __str__(self):
return self.title
class Article(BaseArticle):
date_created = models.DateTimeField()
class UrlArticle(BaseArticle):
"""
An Article class with a get_absolute_url defined.
"""
date_created = models.DateTimeField()
def get_absolute_url(self):
return '/urlarticles/%s/' % self.slug
get_absolute_url.purge = True
class DateArticle(BaseArticle):
"""
An article Model with a DateField instead of DateTimeField,
for testing #7602
"""
date_created = models.DateField()
|
ezequielpereira/Time-Line | refs/heads/master | libs64/wx/lib/masked/maskededit.py | 2 | #----------------------------------------------------------------------------
# Name: maskededit.py
# Authors: Will Sadkin, Jeff Childers
# Email: [email protected], [email protected]
# Created: 02/11/2003
# Copyright: (c) 2003 by Jeff Childers, Will Sadkin, 2003
# Portions: (c) 2002 by Will Sadkin, 2002-2007
# RCS-ID: $Id: maskededit.py 67477 2011-04-13 18:24:56Z RD $
# License: wxWidgets license
#----------------------------------------------------------------------------
# NOTE:
# MaskedEdit controls are based on a suggestion made on [wxPython-Users] by
# Jason Hihn, and borrows liberally from Will Sadkin's original masked edit
# control for time entry, TimeCtrl (which is now rewritten using this
# control!).
#
# MaskedEdit controls do not normally use validators, because they do
# careful manipulation of the cursor in the text window on each keystroke,
# and validation is cursor-position specific, so the control intercepts the
# key codes before the validator would fire. However, validators can be
# provided to do data transfer to the controls.
#
#----------------------------------------------------------------------------
#
# This file now contains the bulk of the logic behind all masked controls,
# the MaskedEditMixin class, the Field class, and the autoformat codes.
#
#----------------------------------------------------------------------------
#
# 03/30/2004 - Will Sadkin ([email protected])
#
# o Split out TextCtrl, ComboBox and IpAddrCtrl into their own files,
# o Reorganized code into masked package
#
# 12/09/2003 - Jeff Grimmett ([email protected])
#
# o Updated for wx namespace. No guarantees. This is one huge file.
#
# 12/13/2003 - Jeff Grimmett ([email protected])
#
# o Missed wx.DateTime stuff earlier.
#
# 12/20/2003 - Jeff Grimmett ([email protected])
#
# o MaskedEditMixin -> MaskedEditMixin
# o wxMaskedTextCtrl -> maskedTextCtrl
# o wxMaskedComboBoxSelectEvent -> MaskedComboBoxSelectEvent
# o wxMaskedComboBox -> MaskedComboBox
# o wxIpAddrCtrl -> IpAddrCtrl
# o wxTimeCtrl -> TimeCtrl
#
__doc__ = """\
contains MaskedEditMixin class that drives all the other masked controls.
====================
Masked Edit Overview
====================
masked.TextCtrl:
is a subclassed text control that can carefully control the user's input
based on a mask string you provide.
General usage example::
control = masked.TextCtrl( win, -1, '', mask = '(###) ###-####')
The example above will create a text control that allows only numbers to be
entered and then only in the positions indicated in the mask by the # sign.
masked.ComboBox:
is a similar subclass of wxComboBox that allows the same sort of masking,
but also can do auto-complete of values, and can require the value typed
to be in the list of choices to be colored appropriately.
masked.Ctrl:
is actually a factory function for several types of masked edit controls:
================= ==================================================
masked.TextCtrl standard masked edit text box
masked.ComboBox adds combobox capabilities
masked.IpAddrCtrl adds special semantics for IP address entry
masked.TimeCtrl special subclass handling lots of types as values
masked.NumCtrl special subclass handling numeric values
================= ==================================================
It works by looking for a *controlType* parameter in the keyword
arguments of the control, to determine what kind of instance to return.
If not specified as a keyword argument, the default control type returned
will be masked.TextCtrl.
Each of the above classes has its own set of arguments, but masked.Ctrl
provides a single "unified" interface for masked controls.
What follows is a description of how to configure the generic masked.TextCtrl
and masked.ComboBox; masked.NumCtrl and masked.TimeCtrl have their own demo
pages and interface descriptions.
=========================
Initialization Parameters
-------------------------
mask
Allowed mask characters and function:
========= ==========================================================
Character Function
========= ==========================================================
# Allow numeric only (0-9)
N Allow letters and numbers (0-9)
A Allow uppercase letters only
a Allow lowercase letters only
C Allow any letter, upper or lower
X Allow string.letters, string.punctuation, string.digits
& Allow string.punctuation only (doesn't include all unicode symbols)
\* Allow any visible character
| explicit field boundary (takes no space in the control; allows mix
of adjacent mask characters to be treated as separate fields,
eg: '&|###' means "field 0 = '&', field 1 = '###'", but there's
no fixed characters in between.
========= ==========================================================
These controls define these sets of characters using string.letters,
string.uppercase, etc. These sets are affected by the system locale
setting, so in order to have the masked controls accept characters
that are specific to your users' language, your application should
set the locale.
For example, to allow international characters to be used in the
above masks, you can place the following in your code as part of
your application's initialization code::
import locale
locale.setlocale(locale.LC_ALL, '')
The controls now also support (by popular demand) all "visible" characters,
by use of the * mask character, including unicode characters above
the standard ANSI keycode range.
Note: As string.punctuation doesn't typically include all unicode
symbols, you will have to use includechars to get some of these into
otherwise restricted positions in your control, such as those specified
with &.
Using these mask characters, a variety of template masks can be built. See
the demo for some other common examples include date+time, social security
number, etc. If any of these characters are needed as template rather
than mask characters, they can be escaped with \, ie. \N means "literal N".
(use \\ for literal backslash, as in: r'CCC\\NNN'.)
*Note:*
Masks containing only # characters and one optional decimal point
character are handled specially, as "numeric" controls. Such
controls have special handling for typing the '-' key, handling
the "decimal point" character as truncating the integer portion,
optionally allowing grouping characters and so forth.
There are several parameters and format codes that only make sense
when combined with such masks, eg. groupChar, decimalChar, and so
forth (see below). These allow you to construct reasonable
numeric entry controls.
*Note:*
Changing the mask for a control deletes any previous field classes
(and any associated validation or formatting constraints) for them.
useFixedWidthFont
By default, masked edit controls use a fixed width font, so that
the mask characters are fixed within the control, regardless of
subsequent modifications to the value. Set to False if having
the control font be the same as other controls is required. (This is
a control-level parameter.)
defaultEncoding
(Applies to unicode systems only) By default, the default unicode encoding
used is latin1, or iso-8859-1. If necessary, you can set this control-level
parameter to govern the codec used to decode your keyboard inputs.
(This is a control-level parameter.)
formatcodes
These other properties can be passed to the class when instantiating it:
Formatcodes are specified as a string of single character formatting
codes that modify behavior of the control::
_ Allow spaces
! Force upper
^ Force lower
R Right-align field(s)
r Right-insert in field(s) (implies R)
< Stay in field until explicit navigation out of it
> Allow insert/delete within partially filled fields (as
opposed to the default "overwrite" mode for fixed-width
masked edit controls.) This allows single-field controls
or each field within a multi-field control to optionally
behave more like standard text controls.
(See EMAIL or phone number autoformat examples.)
*Note: This also governs whether backspace/delete operations
shift contents of field to right of cursor, or just blank the
erased section.
Also, when combined with 'r', this indicates that the field
or control allows right insert anywhere within the current
non-empty value in the field. (Otherwise right-insert behavior
is only performed to when the entire right-insertable field is
selected or the cursor is at the right edge of the field.*
, Allow grouping character in integer fields of numeric controls
and auto-group/regroup digits (if the result fits) when leaving
such a field. (If specified, .SetValue() will attempt to
auto-group as well.)
',' is also the default grouping character. To change the
grouping character and/or decimal character, use the groupChar
and decimalChar parameters, respectively.
Note: typing the "decimal point" character in such fields will
clip the value to that left of the cursor for integer
fields of controls with "integer" or "floating point" masks.
If the ',' format code is specified, this will also cause the
resulting digits to be regrouped properly, using the current
grouping character.
- Prepend and reserve leading space for sign to mask and allow
signed values (negative #s shown in red by default.) Can be
used with argument useParensForNegatives (see below.)
0 integer fields get leading zeros
D Date[/time] field
T Time field
F Auto-Fit: the control calculates its size from
the length of the template mask
V validate entered chars against validRegex before allowing them
to be entered vs. being allowed by basic mask and then having
the resulting value just colored as invalid.
(See USSTATE autoformat demo for how this can be used.)
S select entire field when navigating to new field
fillChar
defaultValue
These controls have two options for the initial state of the control.
If a blank control with just the non-editable characters showing
is desired, simply leave the constructor variable fillChar as its
default (' '). If you want some other character there, simply
change the fillChar to that value. Note: changing the control's fillChar
will implicitly reset all of the fields' fillChars to this value.
If you need different default characters in each mask position,
you can specify a defaultValue parameter in the constructor, or
set them for each field individually.
This value must satisfy the non-editable characters of the mask,
but need not conform to the replaceable characters.
groupChar
decimalChar
These parameters govern what character is used to group numbers
and is used to indicate the decimal point for numeric format controls.
The default groupChar is ',', the default decimalChar is '.'
By changing these, you can customize the presentation of numbers
for your location.
Eg::
formatcodes = ',', groupChar='\'' allows 12'345.34
formatcodes = ',', groupChar='.', decimalChar=',' allows 12.345,34
(These are control-level parameters.)
shiftDecimalChar
The default "shiftDecimalChar" (used for "backwards-tabbing" until
shift-tab is fixed in wxPython) is '>' (for QWERTY keyboards.) for
other keyboards, you may want to customize this, eg '?' for shift ',' on
AZERTY keyboards, ':' or ';' for other European keyboards, etc.
(This is a control-level parameter.)
useParensForNegatives=False
This option can be used with signed numeric format controls to
indicate signs via () rather than '-'.
(This is a control-level parameter.)
autoSelect=False
This option can be used to have a field or the control try to
auto-complete on each keystroke if choices have been specified.
autoCompleteKeycodes=[]
By default, DownArrow, PageUp and PageDown will auto-complete a
partially entered field. Shift-DownArrow, Shift-UpArrow, PageUp
and PageDown will also auto-complete, but if the field already
contains a matched value, these keys will cycle through the list
of choices forward or backward as appropriate. Shift-Up and
Shift-Down also take you to the next/previous field after any
auto-complete action.
Additional auto-complete keys can be specified via this parameter.
Any keys so specified will act like PageDown.
(This is a control-level parameter.)
Validating User Input
=====================
There are a variety of initialization parameters that are used to validate
user input. These parameters can apply to the control as a whole, and/or
to individual fields:
===================== ==================================================================
excludeChars A string of characters to exclude even if otherwise allowed
includeChars A string of characters to allow even if otherwise disallowed
validRegex Use a regular expression to validate the contents of the text box
validRange Pass a rangeas list (low,high) to limit numeric fields/values
choices A list of strings that are allowed choices for the control.
choiceRequired value must be member of choices list
compareNoCase Perform case-insensitive matching when validating against list
*Note: for masked.ComboBox, this defaults to True.*
emptyInvalid Boolean indicating whether an empty value should be considered
invalid
validFunc A function to call of the form: bool = func(candidate_value)
which will return True if the candidate_value satisfies some
external criteria for the control in addition to the the
other validation, or False if not. (This validation is
applied last in the chain of validations.)
validRequired Boolean indicating whether or not keys that are allowed by the
mask, but result in an invalid value are allowed to be entered
into the control. Setting this to True implies that a valid
default value is set for the control.
retainFieldValidation False by default; if True, this allows individual fields to
retain their own validation constraints independently of any
subsequent changes to the control's overall parameters.
(This is a control-level parameter.)
validator Validators are not normally needed for masked controls, because
of the nature of the validation and control of input. However,
you can supply one to provide data transfer routines for the
controls.
raiseOnInvalidPaste False by default; normally a bad paste simply is ignored with a bell;
if True, this will cause a ValueError exception to be thrown,
with the .value attribute of the exception containing the bad value.
stopFieldChangeIfInvalid
False by default; tries to prevent navigation out of a field if its
current value is invalid. Can be used to create a hybrid of validation
settings, allowing intermediate invalid values in a field without
sacrificing ability to limit values as with validRequired.
NOTE: It is possible to end up with an invalid value when using
this option if focus is switched to some other control via mousing.
To avoid this, consider deriving a class that defines _LostFocus()
function that returns the control to a valid value when the focus
shifts. (AFAICT, The change in focus is unpreventable.)
===================== =================================================================
Coloring Behavior
=================
The following parameters have been provided to allow you to change the default
coloring behavior of the control. These can be set at construction, or via
the .SetCtrlParameters() function. Pass a color as string e.g. 'Yellow':
======================== =======================================================================
emptyBackgroundColour Control Background color when identified as empty. Default=White
invalidBackgroundColour Control Background color when identified as Not valid. Default=Yellow
validBackgroundColour Control Background color when identified as Valid. Default=white
======================== =======================================================================
The following parameters control the default foreground color coloring behavior of the
control. Pass a color as string e.g. 'Yellow':
======================== ======================================================================
foregroundColour Control foreground color when value is not negative. Default=Black
signedForegroundColour Control foreground color when value is negative. Default=Red
======================== ======================================================================
Fields
======
Each part of the mask that allows user input is considered a field. The fields
are represented by their own class instances. You can specify field-specific
constraints by constructing or accessing the field instances for the control
and then specifying those constraints via parameters.
fields
This parameter allows you to specify Field instances containing
constraints for the individual fields of a control, eg: local
choice lists, validation rules, functions, regexps, etc.
It can be either an ordered list or a dictionary. If a list,
the fields will be applied as fields 0, 1, 2, etc.
If a dictionary, it should be keyed by field index.
the values should be a instances of maskededit.Field.
Any field not represented by the list or dictionary will be
implicitly created by the control.
Eg::
fields = [ Field(formatcodes='_r'), Field('choices=['a', 'b', 'c']) ]
Or::
fields = {
1: ( Field(formatcodes='_R', choices=['a', 'b', 'c']),
3: ( Field(choices=['01', '02', '03'], choiceRequired=True)
}
The following parameters are available for individual fields, with the
same semantics as for the whole control but applied to the field in question:
============== =============================================================================
fillChar if set for a field, it will override the control's fillChar for that field
groupChar if set for a field, it will override the control's default
defaultValue sets field-specific default value; overrides any default from control
compareNoCase overrides control's settings
emptyInvalid determines whether field is required to be filled at all times
validRequired if set, requires field to contain valid value
============== =============================================================================
If any of the above parameters are subsequently specified for the control as a
whole, that new value will be propagated to each field, unless the
retainFieldValidation control-level parameter is set.
============== ==============================
formatcodes Augments control's settings
excludeChars ' ' '
includeChars ' ' '
validRegex ' ' '
validRange ' ' '
choices ' ' '
choiceRequired ' ' '
validFunc ' ' '
============== ==============================
Control Class Functions
=======================
.GetPlainValue(value=None)
Returns the value specified (or the control's text value
not specified) without the formatting text.
In the example above, might return phone no='3522640075',
whereas control.GetValue() would return '(352) 264-0075'
.ClearValue()
Returns the control's value to its default, and places the
cursor at the beginning of the control.
.SetValue()
Does "smart replacement" of passed value into the control, as does
the .Paste() method. As with other text entry controls, the
.SetValue() text replacement begins at left-edge of the control,
with missing mask characters inserted as appropriate.
.SetValue will also adjust integer, float or date mask entry values,
adding commas, auto-completing years, etc. as appropriate.
For "right-aligned" numeric controls, it will also now automatically
right-adjust any value whose length is less than the width of the
control before attempting to set the value.
If a value does not follow the format of the control's mask, or will
not fit into the control, a ValueError exception will be raised.
Eg::
mask = '(###) ###-####'
.SetValue('1234567890') => '(123) 456-7890'
.SetValue('(123)4567890') => '(123) 456-7890'
.SetValue('(123)456-7890') => '(123) 456-7890'
.SetValue('123/4567-890') => illegal paste; ValueError
mask = '#{6}.#{2}', formatcodes = '_,-',
.SetValue('111') => ' 111 . '
.SetValue(' %9.2f' % -111.12345 ) => ' -111.12'
.SetValue(' %9.2f' % 1234.00 ) => ' 1,234.00'
.SetValue(' %9.2f' % -1234567.12345 ) => insufficient room; ValueError
mask = '#{6}.#{2}', formatcodes = '_,-R' # will right-adjust value for right-aligned control
.SetValue('111') => padded value misalignment ValueError: " 111" will not fit
.SetValue('%.2f' % 111 ) => ' 111.00'
.SetValue('%.2f' % -111.12345 ) => ' -111.12'
.IsValid(value=None)
Returns True if the value specified (or the value of the control
if not specified) passes validation tests
.IsEmpty(value=None)
Returns True if the value specified (or the value of the control
if not specified) is equal to an "empty value," ie. all
editable characters == the fillChar for their respective fields.
.IsDefault(value=None)
Returns True if the value specified (or the value of the control
if not specified) is equal to the initial value of the control.
.Refresh()
Recolors the control as appropriate to its current settings.
.SetCtrlParameters(\*\*kwargs)
This function allows you to set up and/or change the control parameters
after construction; it takes a list of key/value pairs as arguments,
where the keys can be any of the mask-specific parameters in the constructor.
Eg::
ctl = masked.TextCtrl( self, -1 )
ctl.SetCtrlParameters( mask='###-####',
defaultValue='555-1212',
formatcodes='F')
.GetCtrlParameter(parametername)
This function allows you to retrieve the current value of a parameter
from the control.
*Note:* Each of the control parameters can also be set using its
own Set and Get function. These functions follow a regular form:
All of the parameter names start with lower case; for their
corresponding Set/Get function, the parameter name is capitalized.
Eg::
ctl.SetMask('###-####')
ctl.SetDefaultValue('555-1212')
ctl.GetChoiceRequired()
ctl.GetFormatcodes()
*Note:* After any change in parameters, the choices for the
control are reevaluated to ensure that they are still legal. If you
have large choice lists, it is therefore more efficient to set parameters
before setting the choices available.
.SetFieldParameters(field_index, \*\*kwargs)
This function allows you to specify change individual field
parameters after construction. (Indices are 0-based.)
.GetFieldParameter(field_index, parametername)
Allows the retrieval of field parameters after construction
The control detects certain common constructions. In order to use the signed feature
(negative numbers and coloring), the mask has to be all numbers with optionally one
decimal point. Without a decimal (e.g. '######', the control will treat it as an integer
value. With a decimal (e.g. '###.##'), the control will act as a floating point control
(i.e. press decimal to 'tab' to the decimal position). Pressing decimal in the
integer control truncates the value. However, for a true numeric control,
masked.NumCtrl provides all this, and true numeric input/output support as well.
Check your controls by calling each control's .IsValid() function and the
.IsEmpty() function to determine which controls have been a) filled in and
b) filled in properly.
Regular expression validations can be used flexibly and creatively.
Take a look at the demo; the zip-code validation succeeds as long as the
first five numerals are entered. the last four are optional, but if
any are entered, there must be 4 to be valid.
masked.Ctrl Configuration
=========================
masked.Ctrl works by looking for a special *controlType*
parameter in the variable arguments of the control, to determine
what kind of instance to return.
controlType can be one of::
controlTypes.TEXT
controlTypes.COMBO
controlTypes.IPADDR
controlTypes.TIME
controlTypes.NUMBER
These constants are also available individually, ie, you can
use either of the following::
from wxPython.wx.lib.masked import MaskedCtrl, controlTypes
from wxPython.wx.lib.masked import MaskedCtrl, COMBO, TEXT, NUMBER, IPADDR
If not specified as a keyword argument, the default controlType is
controlTypes.TEXT.
"""
"""
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
DEVELOPER COMMENTS:
Naming Conventions
------------------
All methods of the Mixin that are not meant to be exposed to the external
interface are prefaced with '_'. Those functions that are primarily
intended to be internal subroutines subsequently start with a lower-case
letter; those that are primarily intended to be used and/or overridden
by derived subclasses start with a capital letter.
The following methods must be used and/or defined when deriving a control
from MaskedEditMixin. NOTE: if deriving from a *masked edit* control
(eg. class IpAddrCtrl(masked.TextCtrl) ), then this is NOT necessary,
as it's already been done for you in the base class.
._SetInitialValue()
This function must be called after the associated base
control has been initialized in the subclass __init__
function. It sets the initial value of the control,
either to the value specified if non-empty, the
default value if specified, or the "template" for
the empty control as necessary. It will also set/reset
the font if necessary and apply formatting to the
control at this time.
._GetSelection()
REQUIRED
Each class derived from MaskedEditMixin must define
the function for getting the start and end of the
current text selection. The reason for this is
that not all controls have the same function name for
doing this; eg. wx.TextCtrl uses .GetSelection(),
whereas we had to write a .GetMark() function for
wxComboBox, because .GetSelection() for the control
gets the currently selected list item from the combo
box, and the control doesn't (yet) natively provide
a means of determining the text selection.
._SetSelection()
REQUIRED
Similarly to _GetSelection, each class derived from
MaskedEditMixin must define the function for setting
the start and end of the current text selection.
(eg. .SetSelection() for masked.TextCtrl, and .SetMark() for
masked.ComboBox.
._GetInsertionPoint()
._SetInsertionPoint()
REQUIRED
For consistency, and because the mixin shouldn't rely
on fixed names for any manipulations it does of any of
the base controls, we require each class derived from
MaskedEditMixin to define these functions as well.
._GetValue()
._SetValue() REQUIRED
Each class derived from MaskedEditMixin must define
the functions used to get and set the raw value of the
control.
This is necessary so that recursion doesn't take place
when setting the value, and so that the mixin can
call the appropriate function after doing all its
validation and manipulation without knowing what kind
of base control it was mixed in with. To handle undo
functionality, the ._SetValue() must record the current
selection prior to setting the value.
.Cut()
.Paste()
.Undo()
.SetValue() REQUIRED
Each class derived from MaskedEditMixin must redefine
these functions to call the _Cut(), _Paste(), _Undo()
and _SetValue() methods, respectively for the control,
so as to prevent programmatic corruption of the control's
value. This must be done in each derivation, as the
mixin cannot itself override a member of a sibling class.
._Refresh() REQUIRED
Each class derived from MaskedEditMixin must define
the function used to refresh the base control.
.Refresh() REQUIRED
Each class derived from MaskedEditMixin must redefine
this function so that it checks the validity of the
control (via self._CheckValid) and then refreshes
control using the base class method.
._IsEditable() REQUIRED
Each class derived from MaskedEditMixin must define
the function used to determine if the base control is
editable or not. (For masked.ComboBox, this has to
be done with code, rather than specifying the proper
function in the base control, as there isn't one...)
._CalcSize() REQUIRED
Each class derived from MaskedEditMixin must define
the function used to determine how wide the control
should be given the mask. (The mixin function
._calcSize() provides a baseline estimate.)
Event Handling
--------------
Event handlers are "chained", and MaskedEditMixin usually
swallows most of the events it sees, thereby preventing any other
handlers from firing in the chain. It is therefore required that
each class derivation using the mixin to have an option to hook up
the event handlers itself or forego this operation and let a
subclass of the masked control do so. For this reason, each
subclass should probably include the following code:
if setupEventHandling:
## Setup event handlers
EVT_SET_FOCUS( self, self._OnFocus ) ## defeat automatic full selection
EVT_KILL_FOCUS( self, self._OnKillFocus ) ## run internal validator
EVT_LEFT_DCLICK(self, self._OnDoubleClick) ## select field under cursor on dclick
EVT_RIGHT_UP(self, self._OnContextMenu ) ## bring up an appropriate context menu
EVT_KEY_DOWN( self, self._OnKeyDown ) ## capture control events not normally seen, eg ctrl-tab.
EVT_CHAR( self, self._OnChar ) ## handle each keypress
EVT_TEXT( self, self.GetId(), self._OnTextChange ) ## color control appropriately & keep
## track of previous value for undo
where setupEventHandling is an argument to its constructor.
These 5 handlers must be "wired up" for the masked edit
controls to provide default behavior. (The setupEventHandling
is an argument to masked.TextCtrl and masked.ComboBox, so
that controls derived from *them* may replace one of these
handlers if they so choose.)
If your derived control wants to preprocess events before
taking action, it should then set up the event handling itself,
so it can be first in the event handler chain.
The following routines are available to facilitate changing
the default behavior of masked edit controls:
._SetKeycodeHandler(keycode, func)
._SetKeyHandler(char, func)
Use to replace default handling for any given keycode.
func should take the key event as argument and return
False if no further action is required to handle the
key. Eg:
self._SetKeycodeHandler(WXK_UP, self.IncrementValue)
self._SetKeyHandler('-', self._OnChangeSign)
(Setting a func of None removes any keyhandler for the given key.)
"Navigation" keys are assumed to change the cursor position, and
therefore don't cause automatic motion of the cursor as insertable
characters do.
._AddNavKeycode(keycode, handler=None)
._AddNavKey(char, handler=None)
Allows controls to specify other keys (and optional handlers)
to be treated as navigational characters. (eg. '.' in IpAddrCtrl)
._GetNavKeycodes() Returns the current list of navigational keycodes.
._SetNavKeycodes(key_func_tuples)
Allows replacement of the current list of keycode
processed as navigation keys, and bind associated
optional keyhandlers. argument is a list of key/handler
tuples. Passing a value of None for the handler in a
given tuple indicates that default processing for the key
is desired.
._FindField(pos) Returns the Field object associated with this position
in the control.
._FindFieldExtent(pos, getslice=False, value=None)
Returns edit_start, edit_end of the field corresponding
to the specified position within the control, and
optionally also returns the current contents of that field.
If value is specified, it will retrieve the slice the corresponding
slice from that value, rather than the current value of the
control.
._AdjustField(pos)
This is, the function that gets called for a given position
whenever the cursor is adjusted to leave a given field.
By default, it adjusts the year in date fields if mask is a date,
It can be overridden by a derived class to
adjust the value of the control at that time.
(eg. IpAddrCtrl reformats the address in this way.)
._Change() Called by internal EVT_TEXT handler. Return False to force
skip of the normal class change event.
._Keypress(key) Called by internal EVT_CHAR handler. Return False to force
skip of the normal class keypress event.
._LostFocus() Called by internal EVT_KILL_FOCUS handler
._OnKeyDown(event)
This is the default EVT_KEY_DOWN routine; it just checks for
"navigation keys", and if event.ControlDown(), it fires the
mixin's _OnChar() routine, as such events are not always seen
by the "cooked" EVT_CHAR routine.
._OnChar(event) This is the main EVT_CHAR handler for the
MaskedEditMixin.
The following routines are used to handle standard actions
for control keys:
_OnArrow(event) used for arrow navigation events
_OnCtrl_A(event) 'select all'
_OnCtrl_C(event) 'copy' (uses base control function, as copy is non-destructive)
_OnCtrl_S(event) 'save' (does nothing)
_OnCtrl_V(event) 'paste' - calls _Paste() method, to do smart paste
_OnCtrl_X(event) 'cut' - calls _Cut() method, to "erase" selection
_OnCtrl_Z(event) 'undo' - resets value to previous value (if any)
_OnChangeField(event) primarily used for tab events, but can be
used for other keys (eg. '.' in IpAddrCtrl)
_OnErase(event) used for backspace and delete
_OnHome(event)
_OnEnd(event)
The following routine provides a hook back to any class derivations, so that
they can react to parameter changes before any value is set/reset as a result of
those changes. (eg. masked.ComboBox needs to detect when the choices list is
modified, either implicitly or explicitly, so it can reset the base control
to have the appropriate choice list *before* the initial value is reset to match.)
_OnCtrlParametersChanged()
Accessor Functions
------------------
For convenience, each class derived from MaskedEditMixin should
define an accessors mixin, so that it exposes only those parameters
that make sense for the derivation. This is done with an intermediate
level of inheritance, ie:
class BaseMaskedTextCtrl( TextCtrl, MaskedEditMixin ):
class TextCtrl( BaseMaskedTextCtrl, MaskedEditAccessorsMixin ):
class ComboBox( BaseMaskedComboBox, MaskedEditAccessorsMixin ):
class NumCtrl( BaseMaskedTextCtrl, MaskedNumCtrlAccessorsMixin ):
class IpAddrCtrl( BaseMaskedTextCtrl, IpAddrCtrlAccessorsMixin ):
class TimeCtrl( BaseMaskedTextCtrl, TimeCtrlAccessorsMixin ):
etc.
Each accessors mixin defines Get/Set functions for the base class parameters
that are appropriate for that derivation.
This allows the base classes to be "more generic," exposing the widest
set of options, while not requiring derived classes to be so general.
"""
import copy
import difflib
import re
import string
import types
import wx
# jmg 12/9/03 - when we cut ties with Py 2.2 and earlier, this would
# be a good place to implement the 2.3 logger class
from wx.tools.dbg import Logger
##dbg = Logger()
##dbg(enable=1)
## ---------- ---------- ---------- ---------- ---------- ---------- ----------
## Constants for identifying control keys and classes of keys:
WXK_CTRL_A = (ord('A')+1) - ord('A') ## These keys are not already defined in wx
WXK_CTRL_C = (ord('C')+1) - ord('A')
WXK_CTRL_S = (ord('S')+1) - ord('A')
WXK_CTRL_V = (ord('V')+1) - ord('A')
WXK_CTRL_X = (ord('X')+1) - ord('A')
WXK_CTRL_Z = (ord('Z')+1) - ord('A')
nav = (
wx.WXK_BACK, wx.WXK_LEFT, wx.WXK_RIGHT, wx.WXK_UP, wx.WXK_DOWN, wx.WXK_TAB,
wx.WXK_HOME, wx.WXK_END, wx.WXK_RETURN, wx.WXK_PRIOR, wx.WXK_NEXT,
wx.WXK_NUMPAD_LEFT, wx.WXK_NUMPAD_RIGHT, wx.WXK_NUMPAD_UP, wx.WXK_NUMPAD_DOWN,
wx.WXK_NUMPAD_HOME, wx.WXK_NUMPAD_END, wx.WXK_NUMPAD_ENTER, wx.WXK_NUMPAD_PRIOR, wx.WXK_NUMPAD_NEXT
)
control = (
wx.WXK_BACK, wx.WXK_DELETE, wx.WXK_INSERT,
wx.WXK_NUMPAD_DELETE, wx.WXK_NUMPAD_INSERT,
WXK_CTRL_A, WXK_CTRL_C, WXK_CTRL_S, WXK_CTRL_V,
WXK_CTRL_X, WXK_CTRL_Z
)
# Because unicode can go over the ansi character range, we need to explicitly test
# for all non-visible keystrokes, rather than just assuming a particular range for
# visible characters:
wx_control_keycodes = range(32) + list(nav) + list(control) + [
wx.WXK_START, wx.WXK_LBUTTON, wx.WXK_RBUTTON, wx.WXK_CANCEL, wx.WXK_MBUTTON,
wx.WXK_CLEAR, wx.WXK_SHIFT, wx.WXK_CONTROL, wx.WXK_MENU, wx.WXK_PAUSE,
wx.WXK_CAPITAL, wx.WXK_SELECT, wx.WXK_PRINT, wx.WXK_EXECUTE, wx.WXK_SNAPSHOT,
wx.WXK_HELP, wx.WXK_NUMPAD0, wx.WXK_NUMPAD1, wx.WXK_NUMPAD2, wx.WXK_NUMPAD3,
wx.WXK_NUMPAD4, wx.WXK_NUMPAD5, wx.WXK_NUMPAD6, wx.WXK_NUMPAD7, wx.WXK_NUMPAD8,
wx.WXK_NUMPAD9, wx.WXK_MULTIPLY, wx.WXK_ADD, wx.WXK_SEPARATOR, wx.WXK_SUBTRACT,
wx.WXK_DECIMAL, wx.WXK_DIVIDE, wx.WXK_F1, wx.WXK_F2, wx.WXK_F3, wx.WXK_F4,
wx.WXK_F5, wx.WXK_F6, wx.WXK_F7, wx.WXK_F8, wx.WXK_F9, wx.WXK_F10, wx.WXK_F11,
wx.WXK_F12, wx.WXK_F13, wx.WXK_F14, wx.WXK_F15, wx.WXK_F16, wx.WXK_F17,
wx.WXK_F18, wx.WXK_F19, wx.WXK_F20, wx.WXK_F21, wx.WXK_F22, wx.WXK_F23,
wx.WXK_F24, wx.WXK_NUMLOCK, wx.WXK_SCROLL, wx.WXK_PAGEUP, wx.WXK_PAGEDOWN,
wx.WXK_NUMPAD_SPACE, wx.WXK_NUMPAD_TAB, wx.WXK_NUMPAD_ENTER, wx.WXK_NUMPAD_F1,
wx.WXK_NUMPAD_F2, wx.WXK_NUMPAD_F3, wx.WXK_NUMPAD_F4, wx.WXK_NUMPAD_HOME,
wx.WXK_NUMPAD_LEFT, wx.WXK_NUMPAD_UP, wx.WXK_NUMPAD_RIGHT, wx.WXK_NUMPAD_DOWN,
wx.WXK_NUMPAD_PRIOR, wx.WXK_NUMPAD_PAGEUP, wx.WXK_NUMPAD_NEXT, wx.WXK_NUMPAD_PAGEDOWN,
wx.WXK_NUMPAD_END, wx.WXK_NUMPAD_BEGIN, wx.WXK_NUMPAD_INSERT, wx.WXK_NUMPAD_DELETE,
wx.WXK_NUMPAD_EQUAL, wx.WXK_NUMPAD_MULTIPLY, wx.WXK_NUMPAD_ADD, wx.WXK_NUMPAD_SEPARATOR,
wx.WXK_NUMPAD_SUBTRACT, wx.WXK_NUMPAD_DECIMAL, wx.WXK_NUMPAD_DIVIDE, wx.WXK_WINDOWS_LEFT,
wx.WXK_WINDOWS_RIGHT, wx.WXK_WINDOWS_MENU, wx.WXK_COMMAND,
# Hardware-specific buttons
wx.WXK_SPECIAL1, wx.WXK_SPECIAL2, wx.WXK_SPECIAL3, wx.WXK_SPECIAL4, wx.WXK_SPECIAL5,
wx.WXK_SPECIAL6, wx.WXK_SPECIAL7, wx.WXK_SPECIAL8, wx.WXK_SPECIAL9, wx.WXK_SPECIAL10,
wx.WXK_SPECIAL11, wx.WXK_SPECIAL12, wx.WXK_SPECIAL13, wx.WXK_SPECIAL14, wx.WXK_SPECIAL15,
wx.WXK_SPECIAL16, wx.WXK_SPECIAL17, wx.WXK_SPECIAL18, wx.WXK_SPECIAL19, wx.WXK_SPECIAL20
]
## ---------- ---------- ---------- ---------- ---------- ---------- ----------
## Constants for masking. This is where mask characters
## are defined.
## maskchars used to identify valid mask characters from all others
## # - allow numeric 0-9 only
## A - allow uppercase only. Combine with forceupper to force lowercase to upper
## a - allow lowercase only. Combine with forcelower to force upper to lowercase
## C - allow any letter, upper or lower
## X - allow string.letters, string.punctuation, string.digits
## & - allow string.punctuation only (doesn't include all unicode symbols)
## * - allow any visible character
## Note: locale settings affect what "uppercase", lowercase, etc comprise.
## Note: '|' is not a maskchar, in that it is a mask processing directive, and so
## does not appear here.
##
maskchars = ("#","A","a","X","C","N",'*','&')
ansichars = ""
for i in xrange(32, 256):
ansichars += chr(i)
months = '(01|02|03|04|05|06|07|08|09|10|11|12)'
charmonths = '(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec)'
charmonths_dict = {'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5, 'jun': 6,
'jul': 7, 'aug': 8, 'sep': 9, 'oct': 10, 'nov': 11, 'dec': 12}
days = '(01|02|03|04|05|06|07|08|09|10|11|12|13|14|15|16|17|18|19|20|21|22|23|24|25|26|27|28|29|30|31)'
hours = '(0\d| \d|1[012])'
milhours = '(00|01|02|03|04|05|06|07|08|09|10|11|12|13|14|15|16|17|18|19|20|21|22|23)'
minutes = """(00|01|02|03|04|05|06|07|08|09|10|11|12|13|14|15|\
16|17|18|19|20|21|22|23|24|25|26|27|28|29|30|31|32|33|34|35|\
36|37|38|39|40|41|42|43|44|45|46|47|48|49|50|51|52|53|54|55|\
56|57|58|59)"""
seconds = minutes
am_pm_exclude = 'BCDEFGHIJKLMNOQRSTUVWXYZ\x8a\x8c\x8e\x9f\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd8\xd9\xda\xdb\xdc\xdd\xde'
states = "AL,AK,AZ,AR,CA,CO,CT,DE,DC,FL,GA,GU,HI,ID,IL,IN,IA,KS,KY,LA,MA,ME,MD,MI,MN,MS,MO,MT,NE,NV,NH,NJ,NM,NY,NC,ND,OH,OK,OR,PA,PR,RI,SC,SD,TN,TX,UT,VA,VT,VI,WA,WV,WI,WY".split(',')
state_names = ['Alabama','Alaska','Arizona','Arkansas',
'California','Colorado','Connecticut',
'Delaware','District of Columbia',
'Florida','Georgia','Hawaii',
'Idaho','Illinois','Indiana','Iowa',
'Kansas','Kentucky','Louisiana',
'Maine','Maryland','Massachusetts','Michigan',
'Minnesota','Mississippi','Missouri','Montana',
'Nebraska','Nevada','New Hampshire','New Jersey',
'New Mexico','New York','North Carolina','North Dakokta',
'Ohio','Oklahoma','Oregon',
'Pennsylvania','Puerto Rico','Rhode Island',
'South Carolina','South Dakota',
'Tennessee','Texas','Utah',
'Vermont','Virginia',
'Washington','West Virginia',
'Wisconsin','Wyoming']
## ---------- ---------- ---------- ---------- ---------- ---------- ----------
## The following dictionary defines the current set of autoformats:
masktags = {
"USPHONEFULLEXT": {
'mask': "(###) ###-#### x:###",
'formatcodes': 'F^->',
'validRegex': "^\(\d{3}\) \d{3}-\d{4}",
'description': "Phone Number w/opt. ext"
},
"USPHONETIGHTEXT": {
'mask': "###-###-#### x:###",
'formatcodes': 'F^->',
'validRegex': "^\d{3}-\d{3}-\d{4}",
'description': "Phone Number\n (w/hyphens and opt. ext)"
},
"USPHONEFULL": {
'mask': "(###) ###-####",
'formatcodes': 'F^->',
'validRegex': "^\(\d{3}\) \d{3}-\d{4}",
'description': "Phone Number only"
},
"USPHONETIGHT": {
'mask': "###-###-####",
'formatcodes': 'F^->',
'validRegex': "^\d{3}-\d{3}-\d{4}",
'description': "Phone Number\n(w/hyphens)"
},
"USSTATE": {
'mask': "AA",
'formatcodes': 'F!V',
'validRegex': "([ACDFGHIKLMNOPRSTUVW] |%s)" % string.join(states,'|'),
'choices': states,
'choiceRequired': True,
'description': "US State Code"
},
"USSTATENAME": {
'mask': "ACCCCCCCCCCCCCCCCCCC",
'formatcodes': 'F_',
'validRegex': "([ACDFGHIKLMNOPRSTUVW] |%s)" % string.join(state_names,'|'),
'choices': state_names,
'choiceRequired': True,
'description': "US State Name"
},
"USDATETIMEMMDDYYYY/HHMMSS": {
'mask': "##/##/#### ##:##:## AM",
'excludeChars': am_pm_exclude,
'formatcodes': 'DF!',
'validRegex': '^' + months + '/' + days + '/' + '\d{4} ' + hours + ':' + minutes + ':' + seconds + ' (A|P)M',
'description': "US Date + Time"
},
"USDATETIMEMMDDYYYY-HHMMSS": {
'mask': "##-##-#### ##:##:## AM",
'excludeChars': am_pm_exclude,
'formatcodes': 'DF!',
'validRegex': '^' + months + '-' + days + '-' + '\d{4} ' + hours + ':' + minutes + ':' + seconds + ' (A|P)M',
'description': "US Date + Time\n(w/hypens)"
},
"USDATE24HRTIMEMMDDYYYY/HHMMSS": {
'mask': "##/##/#### ##:##:##",
'formatcodes': 'DF',
'validRegex': '^' + months + '/' + days + '/' + '\d{4} ' + milhours + ':' + minutes + ':' + seconds,
'description': "US Date + 24Hr (Military) Time"
},
"USDATE24HRTIMEMMDDYYYY-HHMMSS": {
'mask': "##-##-#### ##:##:##",
'formatcodes': 'DF',
'validRegex': '^' + months + '-' + days + '-' + '\d{4} ' + milhours + ':' + minutes + ':' + seconds,
'description': "US Date + 24Hr Time\n(w/hypens)"
},
"USDATETIMEMMDDYYYY/HHMM": {
'mask': "##/##/#### ##:## AM",
'excludeChars': am_pm_exclude,
'formatcodes': 'DF!',
'validRegex': '^' + months + '/' + days + '/' + '\d{4} ' + hours + ':' + minutes + ' (A|P)M',
'description': "US Date + Time\n(without seconds)"
},
"USDATE24HRTIMEMMDDYYYY/HHMM": {
'mask': "##/##/#### ##:##",
'formatcodes': 'DF',
'validRegex': '^' + months + '/' + days + '/' + '\d{4} ' + milhours + ':' + minutes,
'description': "US Date + 24Hr Time\n(without seconds)"
},
"USDATETIMEMMDDYYYY-HHMM": {
'mask': "##-##-#### ##:## AM",
'excludeChars': am_pm_exclude,
'formatcodes': 'DF!',
'validRegex': '^' + months + '-' + days + '-' + '\d{4} ' + hours + ':' + minutes + ' (A|P)M',
'description': "US Date + Time\n(w/hypens and w/o secs)"
},
"USDATE24HRTIMEMMDDYYYY-HHMM": {
'mask': "##-##-#### ##:##",
'formatcodes': 'DF',
'validRegex': '^' + months + '-' + days + '-' + '\d{4} ' + milhours + ':' + minutes,
'description': "US Date + 24Hr Time\n(w/hyphens and w/o seconds)"
},
"USDATEMMDDYYYY/": {
'mask': "##/##/####",
'formatcodes': 'DF',
'validRegex': '^' + months + '/' + days + '/' + '\d{4}',
'description': "US Date\n(MMDDYYYY)"
},
"USDATEMMDDYY/": {
'mask': "##/##/##",
'formatcodes': 'DF',
'validRegex': '^' + months + '/' + days + '/\d\d',
'description': "US Date\n(MMDDYY)"
},
"USDATEMMDDYYYY-": {
'mask': "##-##-####",
'formatcodes': 'DF',
'validRegex': '^' + months + '-' + days + '-' +'\d{4}',
'description': "MM-DD-YYYY"
},
"EUDATEYYYYMMDD/": {
'mask': "####/##/##",
'formatcodes': 'DF',
'validRegex': '^' + '\d{4}'+ '/' + months + '/' + days,
'description': "YYYY/MM/DD"
},
"EUDATEYYYYMMDD.": {
'mask': "####.##.##",
'formatcodes': 'DF',
'validRegex': '^' + '\d{4}'+ '.' + months + '.' + days,
'description': "YYYY.MM.DD"
},
"EUDATEDDMMYYYY/": {
'mask': "##/##/####",
'formatcodes': 'DF',
'validRegex': '^' + days + '/' + months + '/' + '\d{4}',
'description': "DD/MM/YYYY"
},
"EUDATEDDMMYYYY.": {
'mask': "##.##.####",
'formatcodes': 'DF',
'validRegex': '^' + days + '.' + months + '.' + '\d{4}',
'description': "DD.MM.YYYY"
},
"EUDATEDDMMMYYYY.": {
'mask': "##.CCC.####",
'formatcodes': 'DF',
'validRegex': '^' + days + '.' + charmonths + '.' + '\d{4}',
'description': "DD.Month.YYYY"
},
"EUDATEDDMMMYYYY/": {
'mask': "##/CCC/####",
'formatcodes': 'DF',
'validRegex': '^' + days + '/' + charmonths + '/' + '\d{4}',
'description': "DD/Month/YYYY"
},
"EUDATETIMEYYYYMMDD/HHMMSS": {
'mask': "####/##/## ##:##:## AM",
'excludeChars': am_pm_exclude,
'formatcodes': 'DF!',
'validRegex': '^' + '\d{4}'+ '/' + months + '/' + days + ' ' + hours + ':' + minutes + ':' + seconds + ' (A|P)M',
'description': "YYYY/MM/DD HH:MM:SS"
},
"EUDATETIMEYYYYMMDD.HHMMSS": {
'mask': "####.##.## ##:##:## AM",
'excludeChars': am_pm_exclude,
'formatcodes': 'DF!',
'validRegex': '^' + '\d{4}'+ '.' + months + '.' + days + ' ' + hours + ':' + minutes + ':' + seconds + ' (A|P)M',
'description': "YYYY.MM.DD HH:MM:SS"
},
"EUDATETIMEDDMMYYYY/HHMMSS": {
'mask': "##/##/#### ##:##:## AM",
'excludeChars': am_pm_exclude,
'formatcodes': 'DF!',
'validRegex': '^' + days + '/' + months + '/' + '\d{4} ' + hours + ':' + minutes + ':' + seconds + ' (A|P)M',
'description': "DD/MM/YYYY HH:MM:SS"
},
"EUDATETIMEDDMMYYYY.HHMMSS": {
'mask': "##.##.#### ##:##:## AM",
'excludeChars': am_pm_exclude,
'formatcodes': 'DF!',
'validRegex': '^' + days + '.' + months + '.' + '\d{4} ' + hours + ':' + minutes + ':' + seconds + ' (A|P)M',
'description': "DD.MM.YYYY HH:MM:SS"
},
"EUDATETIMEYYYYMMDD/HHMM": {
'mask': "####/##/## ##:## AM",
'excludeChars': am_pm_exclude,
'formatcodes': 'DF!',
'validRegex': '^' + '\d{4}'+ '/' + months + '/' + days + ' ' + hours + ':' + minutes + ' (A|P)M',
'description': "YYYY/MM/DD HH:MM"
},
"EUDATETIMEYYYYMMDD.HHMM": {
'mask': "####.##.## ##:## AM",
'excludeChars': am_pm_exclude,
'formatcodes': 'DF!',
'validRegex': '^' + '\d{4}'+ '.' + months + '.' + days + ' ' + hours + ':' + minutes + ' (A|P)M',
'description': "YYYY.MM.DD HH:MM"
},
"EUDATETIMEDDMMYYYY/HHMM": {
'mask': "##/##/#### ##:## AM",
'excludeChars': am_pm_exclude,
'formatcodes': 'DF!',
'validRegex': '^' + days + '/' + months + '/' + '\d{4} ' + hours + ':' + minutes + ' (A|P)M',
'description': "DD/MM/YYYY HH:MM"
},
"EUDATETIMEDDMMYYYY.HHMM": {
'mask': "##.##.#### ##:## AM",
'excludeChars': am_pm_exclude,
'formatcodes': 'DF!',
'validRegex': '^' + days + '.' + months + '.' + '\d{4} ' + hours + ':' + minutes + ' (A|P)M',
'description': "DD.MM.YYYY HH:MM"
},
"EUDATE24HRTIMEYYYYMMDD/HHMMSS": {
'mask': "####/##/## ##:##:##",
'formatcodes': 'DF',
'validRegex': '^' + '\d{4}'+ '/' + months + '/' + days + ' ' + milhours + ':' + minutes + ':' + seconds,
'description': "YYYY/MM/DD 24Hr Time"
},
"EUDATE24HRTIMEYYYYMMDD.HHMMSS": {
'mask': "####.##.## ##:##:##",
'formatcodes': 'DF',
'validRegex': '^' + '\d{4}'+ '.' + months + '.' + days + ' ' + milhours + ':' + minutes + ':' + seconds,
'description': "YYYY.MM.DD 24Hr Time"
},
"EUDATE24HRTIMEDDMMYYYY/HHMMSS": {
'mask': "##/##/#### ##:##:##",
'formatcodes': 'DF',
'validRegex': '^' + days + '/' + months + '/' + '\d{4} ' + milhours + ':' + minutes + ':' + seconds,
'description': "DD/MM/YYYY 24Hr Time"
},
"EUDATE24HRTIMEDDMMYYYY.HHMMSS": {
'mask': "##.##.#### ##:##:##",
'formatcodes': 'DF',
'validRegex': '^' + days + '.' + months + '.' + '\d{4} ' + milhours + ':' + minutes + ':' + seconds,
'description': "DD.MM.YYYY 24Hr Time"
},
"EUDATE24HRTIMEYYYYMMDD/HHMM": {
'mask': "####/##/## ##:##",
'formatcodes': 'DF','validRegex': '^' + '\d{4}'+ '/' + months + '/' + days + ' ' + milhours + ':' + minutes,
'description': "YYYY/MM/DD 24Hr Time\n(w/o seconds)"
},
"EUDATE24HRTIMEYYYYMMDD.HHMM": {
'mask': "####.##.## ##:##",
'formatcodes': 'DF',
'validRegex': '^' + '\d{4}'+ '.' + months + '.' + days + ' ' + milhours + ':' + minutes,
'description': "YYYY.MM.DD 24Hr Time\n(w/o seconds)"
},
"EUDATE24HRTIMEDDMMYYYY/HHMM": {
'mask': "##/##/#### ##:##",
'formatcodes': 'DF',
'validRegex': '^' + days + '/' + months + '/' + '\d{4} ' + milhours + ':' + minutes,
'description': "DD/MM/YYYY 24Hr Time\n(w/o seconds)"
},
"EUDATE24HRTIMEDDMMYYYY.HHMM": {
'mask': "##.##.#### ##:##",
'formatcodes': 'DF',
'validRegex': '^' + days + '.' + months + '.' + '\d{4} ' + milhours + ':' + minutes,
'description': "DD.MM.YYYY 24Hr Time\n(w/o seconds)"
},
"TIMEHHMMSS": {
'mask': "##:##:## AM",
'excludeChars': am_pm_exclude,
'formatcodes': 'TF!',
'validRegex': '^' + hours + ':' + minutes + ':' + seconds + ' (A|P)M',
'description': "HH:MM:SS (A|P)M\n(see TimeCtrl)"
},
"TIMEHHMM": {
'mask': "##:## AM",
'excludeChars': am_pm_exclude,
'formatcodes': 'TF!',
'validRegex': '^' + hours + ':' + minutes + ' (A|P)M',
'description': "HH:MM (A|P)M\n(see TimeCtrl)"
},
"24HRTIMEHHMMSS": {
'mask': "##:##:##",
'formatcodes': 'TF',
'validRegex': '^' + milhours + ':' + minutes + ':' + seconds,
'description': "24Hr HH:MM:SS\n(see TimeCtrl)"
},
"24HRTIMEHHMM": {
'mask': "##:##",
'formatcodes': 'TF',
'validRegex': '^' + milhours + ':' + minutes,
'description': "24Hr HH:MM\n(see TimeCtrl)"
},
"USSOCIALSEC": {
'mask': "###-##-####",
'formatcodes': 'F',
'validRegex': "\d{3}-\d{2}-\d{4}",
'description': "Social Sec#"
},
"CREDITCARD": {
'mask': "####-####-####-####",
'formatcodes': 'F',
'validRegex': "\d{4}-\d{4}-\d{4}-\d{4}",
'description': "Credit Card"
},
"EXPDATEMMYY": {
'mask': "##/##",
'formatcodes': "F",
'validRegex': "^" + months + "/\d\d",
'description': "Expiration MM/YY"
},
"USZIP": {
'mask': "#####",
'formatcodes': 'F',
'validRegex': "^\d{5}",
'description': "US 5-digit zip code"
},
"USZIPPLUS4": {
'mask': "#####-####",
'formatcodes': 'F',
'validRegex': "\d{5}-(\s{4}|\d{4})",
'description': "US zip+4 code"
},
"PERCENT": {
'mask': "0.##",
'formatcodes': 'F',
'validRegex': "^0.\d\d",
'description': "Percentage"
},
"AGE": {
'mask': "###",
'formatcodes': "F",
'validRegex': "^[1-9]{1} |[1-9][0-9] |1[0|1|2][0-9]",
'description': "Age"
},
"EMAIL": {
'mask': "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
'excludeChars': " \\/*&%$#!+='\"",
'formatcodes': "F>",
'validRegex': "^\w+([\-\.]\w+)*@((([a-zA-Z0-9]+(\-[a-zA-Z0-9]+)*\.)+)[a-zA-Z]{2,4}|\[(\d|\d\d|(1\d\d|2[0-4]\d|25[0-5]))(\.(\d|\d\d|(1\d\d|2[0-4]\d|25[0-5]))){3}\]) *$",
'description': "Email address"
},
"IPADDR": {
'mask': "###.###.###.###",
'formatcodes': 'F_Sr',
'validRegex': "( \d| \d\d|(1\d\d|2[0-4]\d|25[0-5]))(\.( \d| \d\d|(1\d\d|2[0-4]\d|25[0-5]))){3}",
'description': "IP Address\n(see IpAddrCtrl)"
}
}
# build demo-friendly dictionary of descriptions of autoformats
autoformats = []
for key, value in masktags.items():
autoformats.append((key, value['description']))
autoformats.sort()
## ---------- ---------- ---------- ---------- ---------- ---------- ----------
class Field:
"""
This class manages the individual fields in a masked edit control.
Each field has a zero-based index, indicating its position in the
control, an extent, an associated mask, and a plethora of optional
parameters. Fields can be instantiated and then associated with
parent masked controls, in order to provide field-specific configuration.
Alternatively, fields will be implicitly created by the parent control
if not provided at construction, at which point, the fields can then
manipulated by the controls .SetFieldParameters() method.
"""
valid_params = {
'index': None, ## which field of mask; set by parent control.
'mask': "", ## mask chars for this field
'extent': (), ## (edit start, edit_end) of field; set by parent control.
'formatcodes': "", ## codes indicating formatting options for the control
'fillChar': ' ', ## used as initial value for each mask position if initial value is not given
'groupChar': ',', ## used with numeric fields; indicates what char groups 3-tuple digits
'decimalChar': '.', ## used with numeric fields; indicates what char separates integer from fraction
'shiftDecimalChar': '>', ## used with numeric fields, indicates what is above the decimal point char on keyboard
'useParensForNegatives': False, ## used with numeric fields, indicates that () should be used vs. - to show negative numbers.
'defaultValue': "", ## use if you want different positional defaults vs. all the same fillChar
'excludeChars': "", ## optional string of chars to exclude even if main mask type does
'includeChars': "", ## optional string of chars to allow even if main mask type doesn't
'validRegex': "", ## optional regular expression to use to validate the control
'validRange': (), ## Optional hi-low range for numerics
'choices': [], ## Optional list for character expressions
'choiceRequired': False, ## If choices supplied this specifies if valid value must be in the list
'compareNoCase': False, ## Optional flag to indicate whether or not to use case-insensitive list search
'autoSelect': False, ## Set to True to try auto-completion on each keystroke:
'validFunc': None, ## Optional function for defining additional, possibly dynamic validation constraints on contrl
'validRequired': False, ## Set to True to disallow input that results in an invalid value
'emptyInvalid': False, ## Set to True to make EMPTY = INVALID
'description': "", ## primarily for autoformats, but could be useful elsewhere
'raiseOnInvalidPaste': False, ## if True, paste into field will cause ValueError
'stopFieldChangeIfInvalid': False,## if True, disallow field navigation out of invalid field
}
# This list contains all parameters that when set at the control level should
# propagate down to each field:
propagating_params = ('fillChar', 'groupChar', 'decimalChar','useParensForNegatives',
'compareNoCase', 'emptyInvalid', 'validRequired', 'raiseOnInvalidPaste',
'stopFieldChangeIfInvalid')
def __init__(self, **kwargs):
"""
This is the "constructor" for setting up parameters for fields.
a field_index of -1 is used to indicate "the entire control."
"""
#### dbg('Field::Field', indent=1)
# Validate legitimate set of parameters:
for key in kwargs.keys():
if key not in Field.valid_params.keys():
#### dbg(indent=0)
ae = AttributeError('invalid parameter "%s"' % (key))
ae.attribute = key
raise ae
# Set defaults for each parameter for this instance, and fully
# populate initial parameter list for configuration:
for key, value in Field.valid_params.items():
setattr(self, '_' + key, copy.copy(value))
if not kwargs.has_key(key):
kwargs[key] = copy.copy(value)
self._autoCompleteIndex = -1
self._SetParameters(**kwargs)
self._ValidateParameters(**kwargs)
#### dbg(indent=0)
def _SetParameters(self, **kwargs):
"""
This function can be used to set individual or multiple parameters for
a masked edit field parameter after construction.
"""
## dbg(suspend=1)
## dbg('maskededit.Field::_SetParameters', indent=1)
# Validate keyword arguments:
for key in kwargs.keys():
if key not in Field.valid_params.keys():
## dbg(indent=0, suspend=0)
ae = AttributeError('invalid keyword argument "%s"' % key)
ae.attribute = key
raise ae
## if self._index is not None: dbg('field index:', self._index)
## dbg('parameters:', indent=1)
for key, value in kwargs.items():
## dbg('%s:' % key, value)
pass
## dbg(indent=0)
old_fillChar = self._fillChar # store so we can change choice lists accordingly if it changes
# First, Assign all parameters specified:
for key in Field.valid_params.keys():
if kwargs.has_key(key):
setattr(self, '_' + key, kwargs[key] )
if kwargs.has_key('formatcodes'): # (set/changed)
self._forceupper = '!' in self._formatcodes
self._forcelower = '^' in self._formatcodes
self._groupdigits = ',' in self._formatcodes
self._okSpaces = '_' in self._formatcodes
self._padZero = '0' in self._formatcodes
self._autofit = 'F' in self._formatcodes
self._insertRight = 'r' in self._formatcodes
self._allowInsert = '>' in self._formatcodes
self._alignRight = 'R' in self._formatcodes or 'r' in self._formatcodes
self._moveOnFieldFull = not '<' in self._formatcodes
self._selectOnFieldEntry = 'S' in self._formatcodes
if kwargs.has_key('groupChar'):
self._groupChar = kwargs['groupChar']
if kwargs.has_key('decimalChar'):
self._decimalChar = kwargs['decimalChar']
if kwargs.has_key('shiftDecimalChar'):
self._shiftDecimalChar = kwargs['shiftDecimalChar']
if kwargs.has_key('formatcodes') or kwargs.has_key('validRegex'):
self._regexMask = 'V' in self._formatcodes and self._validRegex
if kwargs.has_key('fillChar'):
self._old_fillChar = old_fillChar
#### dbg("self._old_fillChar: '%s'" % self._old_fillChar)
if kwargs.has_key('mask') or kwargs.has_key('validRegex'): # (set/changed)
self._isInt = _isInteger(self._mask)
## dbg('isInt?', self._isInt, 'self._mask:"%s"' % self._mask)
## dbg(indent=0, suspend=0)
def _ValidateParameters(self, **kwargs):
"""
This function can be used to validate individual or multiple parameters for
a masked edit field parameter after construction.
"""
## dbg(suspend=1)
## dbg('maskededit.Field::_ValidateParameters', indent=1)
## if self._index is not None: dbg('field index:', self._index)
#### dbg('parameters:', indent=1)
## for key, value in kwargs.items():
#### dbg('%s:' % key, value)
#### dbg(indent=0)
#### dbg("self._old_fillChar: '%s'" % self._old_fillChar)
# Verify proper numeric format params:
if self._groupdigits and self._groupChar == self._decimalChar:
## dbg(indent=0, suspend=0)
ae = AttributeError("groupChar '%s' cannot be the same as decimalChar '%s'" % (self._groupChar, self._decimalChar))
ae.attribute = self._groupChar
raise ae
# Now go do validation, semantic and inter-dependency parameter processing:
if kwargs.has_key('choices') or kwargs.has_key('compareNoCase') or kwargs.has_key('choiceRequired'): # (set/changed)
self._compareChoices = [choice.strip() for choice in self._choices]
if self._compareNoCase and self._choices:
self._compareChoices = [item.lower() for item in self._compareChoices]
if kwargs.has_key('choices'):
self._autoCompleteIndex = -1
if kwargs.has_key('validRegex'): # (set/changed)
if self._validRegex:
try:
if self._compareNoCase:
self._filter = re.compile(self._validRegex, re.IGNORECASE)
else:
self._filter = re.compile(self._validRegex)
except:
## dbg(indent=0, suspend=0)
raise TypeError('%s: validRegex "%s" not a legal regular expression' % (str(self._index), self._validRegex))
else:
self._filter = None
if kwargs.has_key('validRange'): # (set/changed)
self._hasRange = False
self._rangeHigh = 0
self._rangeLow = 0
if self._validRange:
if type(self._validRange) != types.TupleType or len( self._validRange )!= 2 or self._validRange[0] > self._validRange[1]:
## dbg(indent=0, suspend=0)
raise TypeError('%s: validRange %s parameter must be tuple of form (a,b) where a <= b'
% (str(self._index), repr(self._validRange)) )
self._hasRange = True
self._rangeLow = self._validRange[0]
self._rangeHigh = self._validRange[1]
if kwargs.has_key('choices') or (len(self._choices) and len(self._choices[0]) != len(self._mask)): # (set/changed)
self._hasList = False
if self._choices and type(self._choices) not in (types.TupleType, types.ListType):
## dbg(indent=0, suspend=0)
raise TypeError('%s: choices must be a sequence of strings' % str(self._index))
elif len( self._choices) > 0:
for choice in self._choices:
if type(choice) not in (types.StringType, types.UnicodeType):
## dbg(indent=0, suspend=0)
raise TypeError('%s: choices must be a sequence of strings' % str(self._index))
length = len(self._mask)
## dbg('len(%s)' % self._mask, length, 'len(self._choices):', len(self._choices), 'length:', length, 'self._alignRight?', self._alignRight)
if len(self._choices) and length:
if len(self._choices[0]) > length:
# changed mask without respecifying choices; readjust the width as appropriate:
self._choices = [choice.strip() for choice in self._choices]
if self._alignRight:
self._choices = [choice.rjust( length ) for choice in self._choices]
else:
self._choices = [choice.ljust( length ) for choice in self._choices]
## dbg('aligned choices:', self._choices)
if hasattr(self, '_template'):
# Verify each choice specified is valid:
for choice in self._choices:
if self.IsEmpty(choice) and not self._validRequired:
# allow empty values even if invalid, (just colored differently)
continue
if not self.IsValid(choice):
## dbg(indent=0, suspend=0)
ve = ValueError('%s: "%s" is not a valid value for the control as specified.' % (str(self._index), choice))
ve.value = choice
raise ve
self._hasList = True
#### dbg("kwargs.has_key('fillChar')?", kwargs.has_key('fillChar'), "len(self._choices) > 0?", len(self._choices) > 0)
#### dbg("self._old_fillChar:'%s'" % self._old_fillChar, "self._fillChar: '%s'" % self._fillChar)
if kwargs.has_key('fillChar') and len(self._choices) > 0:
if kwargs['fillChar'] != ' ':
self._choices = [choice.replace(' ', self._fillChar) for choice in self._choices]
else:
self._choices = [choice.replace(self._old_fillChar, self._fillChar) for choice in self._choices]
## dbg('updated choices:', self._choices)
if kwargs.has_key('autoSelect') and kwargs['autoSelect']:
if not self._hasList:
## dbg('no list to auto complete; ignoring "autoSelect=True"')
self._autoSelect = False
# reset field validity assumption:
self._valid = True
## dbg(indent=0, suspend=0)
def _GetParameter(self, paramname):
"""
Routine for retrieving the value of any given parameter
"""
if Field.valid_params.has_key(paramname):
return getattr(self, '_' + paramname)
else:
TypeError('Field._GetParameter: invalid parameter "%s"' % key)
def IsEmpty(self, slice):
"""
Indicates whether the specified slice is considered empty for the
field.
"""
## dbg('Field::IsEmpty("%s")' % slice, indent=1)
if not hasattr(self, '_template'):
## dbg(indent=0)
raise AttributeError('_template')
## dbg('self._template: "%s"' % self._template)
## dbg('self._defaultValue: "%s"' % str(self._defaultValue))
if slice == self._template and not self._defaultValue:
## dbg(indent=0)
return True
elif slice == self._template:
empty = True
for pos in range(len(self._template)):
#### dbg('slice[%(pos)d] != self._fillChar?' %locals(), slice[pos] != self._fillChar[pos])
if slice[pos] not in (' ', self._fillChar):
empty = False
break
## dbg("IsEmpty? %(empty)d (do all mask chars == fillChar?)" % locals(), indent=0)
return empty
else:
## dbg("IsEmpty? 0 (slice doesn't match template)", indent=0)
return False
def IsValid(self, slice):
"""
Indicates whether the specified slice is considered a valid value for the
field.
"""
## dbg(suspend=1)
## dbg('Field[%s]::IsValid("%s")' % (str(self._index), slice), indent=1)
valid = True # assume true to start
if self.IsEmpty(slice):
## dbg(indent=0, suspend=0)
if self._emptyInvalid:
return False
else:
return True
elif self._hasList and self._choiceRequired:
## dbg("(member of list required)")
# do case-insensitive match on list; strip surrounding whitespace from slice (already done for choices):
if self._fillChar != ' ':
slice = slice.replace(self._fillChar, ' ')
## dbg('updated slice:"%s"' % slice)
compareStr = slice.strip()
if self._compareNoCase:
compareStr = compareStr.lower()
valid = compareStr in self._compareChoices
elif self._hasRange and not self.IsEmpty(slice):
## dbg('validating against range')
try:
# allow float as well as int ranges (int comparisons for free.)
valid = self._rangeLow <= float(slice) <= self._rangeHigh
except:
valid = False
elif self._validRegex and self._filter:
## dbg('validating against regex')
valid = (re.match( self._filter, slice) is not None)
if valid and self._validFunc:
## dbg('validating against supplied function')
valid = self._validFunc(slice)
## dbg('valid?', valid, indent=0, suspend=0)
return valid
def _AdjustField(self, slice):
""" 'Fixes' an integer field. Right or left-justifies, as required."""
## dbg('Field::_AdjustField("%s")' % slice, indent=1)
length = len(self._mask)
#### dbg('length(self._mask):', length)
#### dbg('self._useParensForNegatives?', self._useParensForNegatives)
if self._isInt:
if self._useParensForNegatives:
signpos = slice.find('(')
right_signpos = slice.find(')')
intStr = slice.replace('(', '').replace(')', '') # drop sign, if any
else:
signpos = slice.find('-')
intStr = slice.replace( '-', '' ) # drop sign, if any
right_signpos = -1
intStr = intStr.replace(' ', '') # drop extra spaces
intStr = string.replace(intStr,self._fillChar,"") # drop extra fillchars
intStr = string.replace(intStr,"-","") # drop sign, if any
intStr = string.replace(intStr, self._groupChar, "") # lose commas/dots
#### dbg('intStr:"%s"' % intStr)
start, end = self._extent
field_len = end - start
if not self._padZero and len(intStr) != field_len and intStr.strip():
intStr = str(long(intStr))
#### dbg('raw int str: "%s"' % intStr)
#### dbg('self._groupdigits:', self._groupdigits, 'self._formatcodes:', self._formatcodes)
if self._groupdigits:
new = ''
cnt = 1
for i in range(len(intStr)-1, -1, -1):
new = intStr[i] + new
if (cnt) % 3 == 0:
new = self._groupChar + new
cnt += 1
if new and new[0] == self._groupChar:
new = new[1:]
if len(new) <= length:
# expanded string will still fit and leave room for sign:
intStr = new
# else... leave it without the commas...
## dbg('padzero?', self._padZero)
## dbg('len(intStr):', len(intStr), 'field length:', length)
if self._padZero and len(intStr) < length:
intStr = '0' * (length - len(intStr)) + intStr
if signpos != -1: # we had a sign before; restore it
if self._useParensForNegatives:
intStr = '(' + intStr[1:]
if right_signpos != -1:
intStr += ')'
else:
intStr = '-' + intStr[1:]
elif signpos != -1 and slice[0:signpos].strip() == '': # - was before digits
if self._useParensForNegatives:
intStr = '(' + intStr
if right_signpos != -1:
intStr += ')'
else:
intStr = '-' + intStr
elif right_signpos != -1:
# must have had ')' but '(' was before field; re-add ')'
intStr += ')'
slice = intStr
slice = slice.strip() # drop extra spaces
if self._alignRight: ## Only if right-alignment is enabled
slice = slice.rjust( length )
else:
slice = slice.ljust( length )
if self._fillChar != ' ':
slice = slice.replace(' ', self._fillChar)
## dbg('adjusted slice: "%s"' % slice, indent=0)
return slice
## ---------- ---------- ---------- ---------- ---------- ---------- ----------
class MaskedEditMixin:
"""
This class allows us to abstract the masked edit functionality that could
be associated with any text entry control. (eg. wx.TextCtrl, wx.ComboBox, etc.)
It forms the basis for all of the lib.masked controls.
"""
valid_ctrl_params = {
'mask': 'XXXXXXXXXXXXX', ## mask string for formatting this control
'autoformat': "", ## optional auto-format code to set format from masktags dictionary
'fields': {}, ## optional list/dictionary of maskededit.Field class instances, indexed by position in mask
'datestyle': 'MDY', ## optional date style for date-type values. Can trigger autocomplete year
'autoCompleteKeycodes': [], ## Optional list of additional keycodes which will invoke field-auto-complete
'useFixedWidthFont': True, ## Use fixed-width font instead of default for base control
'defaultEncoding': 'latin1', ## optional argument to indicate unicode codec to use (unicode ctrls only)
'retainFieldValidation': False, ## Set this to true if setting control-level parameters independently,
## from field validation constraints
'emptyBackgroundColour': "White",
'validBackgroundColour': "White",
'invalidBackgroundColour': "Yellow",
'foregroundColour': "Black",
'signedForegroundColour': "Red",
'demo': False}
def __init__(self, name = 'MaskedEdit', **kwargs):
"""
This is the "constructor" for setting up the mixin variable parameters for the composite class.
"""
self.name = name
# set up flag for doing optional things to base control if possible
if not hasattr(self, 'controlInitialized'):
self.controlInitialized = False
# Set internal state var for keeping track of whether or not a character
# action results in a modification of the control, since .SetValue()
# doesn't modify the base control's internal state:
self.modified = False
self._previous_mask = None
# Validate legitimate set of parameters:
for key in kwargs.keys():
if key.replace('Color', 'Colour') not in MaskedEditMixin.valid_ctrl_params.keys() + Field.valid_params.keys():
raise TypeError('%s: invalid parameter "%s"' % (name, key))
## Set up dictionary that can be used by subclasses to override or add to default
## behavior for individual characters. Derived subclasses needing to change
## default behavior for keys can either redefine the default functions for the
## common keys or add functions for specific keys to this list. Each function
## added should take the key event as argument, and return False if the key
## requires no further processing.
##
## Initially populated with navigation and function control keys:
self._keyhandlers = {
# default navigation keys and handlers:
wx.WXK_BACK: self._OnErase,
wx.WXK_LEFT: self._OnArrow,
wx.WXK_NUMPAD_LEFT: self._OnArrow,
wx.WXK_RIGHT: self._OnArrow,
wx.WXK_NUMPAD_RIGHT: self._OnArrow,
wx.WXK_UP: self._OnAutoCompleteField,
wx.WXK_NUMPAD_UP: self._OnAutoCompleteField,
wx.WXK_DOWN: self._OnAutoCompleteField,
wx.WXK_NUMPAD_DOWN: self._OnAutoCompleteField,
wx.WXK_TAB: self._OnChangeField,
wx.WXK_HOME: self._OnHome,
wx.WXK_NUMPAD_HOME: self._OnHome,
wx.WXK_END: self._OnEnd,
wx.WXK_NUMPAD_END: self._OnEnd,
wx.WXK_RETURN: self._OnReturn,
wx.WXK_NUMPAD_ENTER: self._OnReturn,
wx.WXK_PRIOR: self._OnAutoCompleteField,
wx.WXK_NUMPAD_PRIOR: self._OnAutoCompleteField,
wx.WXK_NEXT: self._OnAutoCompleteField,
wx.WXK_NUMPAD_NEXT: self._OnAutoCompleteField,
# default function control keys and handlers:
wx.WXK_DELETE: self._OnDelete,
wx.WXK_NUMPAD_DELETE: self._OnDelete,
wx.WXK_INSERT: self._OnInsert,
wx.WXK_NUMPAD_INSERT: self._OnInsert,
WXK_CTRL_A: self._OnCtrl_A,
WXK_CTRL_C: self._OnCtrl_C,
WXK_CTRL_S: self._OnCtrl_S,
WXK_CTRL_V: self._OnCtrl_V,
WXK_CTRL_X: self._OnCtrl_X,
WXK_CTRL_Z: self._OnCtrl_Z,
}
## bind standard navigational and control keycodes to this instance,
## so that they can be augmented and/or changed in derived classes:
self._nav = list(nav)
self._control = list(control)
## Dynamically evaluate and store string constants for mask chars
## so that locale settings can be made after this module is imported
## and the controls created after that is done can allow the
## appropriate characters:
self.maskchardict = {
'#': string.digits,
'A': string.uppercase,
'a': string.lowercase,
'X': string.letters + string.punctuation + string.digits,
'C': string.letters,
'N': string.letters + string.digits,
'&': string.punctuation,
'*': ansichars # to give it a value, but now allows any non-wxcontrol character
}
## self._ignoreChange is used by MaskedComboBox, because
## of the hack necessary to determine the selection; it causes
## EVT_TEXT messages from the combobox to be ignored if set.
self._ignoreChange = False
# These are used to keep track of previous value, for undo functionality:
self._curValue = None
self._prevValue = None
self._valid = True
# Set defaults for each parameter for this instance, and fully
# populate initial parameter list for configuration:
for key, value in MaskedEditMixin.valid_ctrl_params.items():
setattr(self, '_' + key, copy.copy(value))
if not kwargs.has_key(key):
#### dbg('%s: "%s"' % (key, repr(value)))
kwargs[key] = copy.copy(value)
# Create a "field" that holds global parameters for control constraints
self._ctrl_constraints = self._fields[-1] = Field(index=-1)
self.SetCtrlParameters(**kwargs)
def SetCtrlParameters(self, **kwargs):
"""
This public function can be used to set individual or multiple masked edit
parameters after construction. (See maskededit module overview for the list
of valid parameters.)
"""
## dbg(suspend=1)
## dbg('MaskedEditMixin::SetCtrlParameters', indent=1)
#### dbg('kwargs:', indent=1)
## for key, value in kwargs.items():
#### dbg(key, '=', value)
#### dbg(indent=0)
# Validate keyword arguments:
constraint_kwargs = {}
ctrl_kwargs = {}
for key, value in kwargs.items():
key = key.replace('Color', 'Colour') # for b-c, and standard wxPython spelling
if key not in MaskedEditMixin.valid_ctrl_params.keys() + Field.valid_params.keys():
## dbg(indent=0, suspend=0)
ae = AttributeError('Invalid keyword argument "%s" for control "%s"' % (key, self.name))
ae.attribute = key
raise ae
elif key in Field.valid_params.keys():
constraint_kwargs[key] = value
else:
ctrl_kwargs[key] = value
mask = None
reset_args = {}
if ctrl_kwargs.has_key('autoformat'):
autoformat = ctrl_kwargs['autoformat']
else:
autoformat = None
# handle "parochial name" backward compatibility:
if autoformat and autoformat.find('MILTIME') != -1 and autoformat not in masktags.keys():
autoformat = autoformat.replace('MILTIME', '24HRTIME')
if autoformat != self._autoformat and autoformat in masktags.keys():
## dbg('autoformat:', autoformat)
self._autoformat = autoformat
mask = masktags[self._autoformat]['mask']
# gather rest of any autoformat parameters:
for param, value in masktags[self._autoformat].items():
if param == 'mask': continue # (must be present; already accounted for)
constraint_kwargs[param] = value
elif autoformat and not autoformat in masktags.keys():
ae = AttributeError('invalid value for autoformat parameter: %s' % repr(autoformat))
ae.attribute = autoformat
raise ae
else:
## dbg('autoformat not selected')
if kwargs.has_key('mask'):
mask = kwargs['mask']
## dbg('mask:', mask)
## Assign style flags
if mask is None:
## dbg('preserving previous mask')
mask = self._previous_mask # preserve previous mask
else:
## dbg('mask (re)set')
reset_args['reset_mask'] = mask
constraint_kwargs['mask'] = mask
# wipe out previous fields; preserve new control-level constraints
self._fields = {-1: self._ctrl_constraints}
if ctrl_kwargs.has_key('fields'):
# do field parameter type validation, and conversion to internal dictionary
# as appropriate:
fields = ctrl_kwargs['fields']
if type(fields) in (types.ListType, types.TupleType):
for i in range(len(fields)):
field = fields[i]
if not isinstance(field, Field):
## dbg(indent=0, suspend=0)
raise TypeError('invalid type for field parameter: %s' % repr(field))
self._fields[i] = field
elif type(fields) == types.DictionaryType:
for index, field in fields.items():
if not isinstance(field, Field):
## dbg(indent=0, suspend=0)
raise TypeError('invalid type for field parameter: %s' % repr(field))
self._fields[index] = field
else:
## dbg(indent=0, suspend=0)
raise TypeError('fields parameter must be a list or dictionary; not %s' % repr(fields))
# Assign constraint parameters for entire control:
#### dbg('control constraints:', indent=1)
## for key, value in constraint_kwargs.items():
#### dbg('%s:' % key, value)
#### dbg(indent=0)
# determine if changing parameters that should affect the entire control:
for key in MaskedEditMixin.valid_ctrl_params.keys():
if key in ( 'mask', 'fields' ): continue # (processed separately)
if ctrl_kwargs.has_key(key):
setattr(self, '_' + key, ctrl_kwargs[key])
# Validate color parameters, converting strings to named colors and validating
# result if appropriate:
for key in ('emptyBackgroundColour', 'invalidBackgroundColour', 'validBackgroundColour',
'foregroundColour', 'signedForegroundColour'):
if ctrl_kwargs.has_key(key):
if type(ctrl_kwargs[key]) in (types.StringType, types.UnicodeType):
c = wx.NamedColour(ctrl_kwargs[key])
if c.Get() == (-1, -1, -1):
raise TypeError('%s not a legal color specification for %s' % (repr(ctrl_kwargs[key]), key))
else:
# replace attribute with wxColour object:
setattr(self, '_' + key, c)
# attach a python dynamic attribute to wxColour for debug printouts
c._name = ctrl_kwargs[key]
elif type(ctrl_kwargs[key]) != type(wx.BLACK):
raise TypeError('%s not a legal color specification for %s' % (repr(ctrl_kwargs[key]), key))
## dbg('self._retainFieldValidation:', self._retainFieldValidation)
if not self._retainFieldValidation:
# Build dictionary of any changing parameters which should be propagated to the
# component fields:
for arg in Field.propagating_params:
#### dbg('kwargs.has_key(%s)?' % arg, kwargs.has_key(arg))
#### dbg('getattr(self._ctrl_constraints, _%s)?' % arg, getattr(self._ctrl_constraints, '_'+arg))
reset_args[arg] = kwargs.has_key(arg) and kwargs[arg] != getattr(self._ctrl_constraints, '_'+arg)
#### dbg('reset_args[%s]?' % arg, reset_args[arg])
# Set the control-level constraints:
self._ctrl_constraints._SetParameters(**constraint_kwargs)
# This routine does the bulk of the interdependent parameter processing, determining
# the field extents of the mask if changed, resetting parameters as appropriate,
# determining the overall template value for the control, etc.
self._configure(mask, **reset_args)
# now that we've propagated the field constraints and mask portions to the
# various fields, validate the constraints
self._ctrl_constraints._ValidateParameters(**constraint_kwargs)
# Validate that all choices for given fields are at least of the
# necessary length, and that they all would be valid pastes if pasted
# into their respective fields:
#### dbg('validating choices')
self._validateChoices()
self._autofit = self._ctrl_constraints._autofit
self._isNeg = False
self._isDate = 'D' in self._ctrl_constraints._formatcodes and _isDateType(mask)
self._isTime = 'T' in self._ctrl_constraints._formatcodes and _isTimeType(mask)
if self._isDate:
# Set _dateExtent, used in date validation to locate date in string;
# always set as though year will be 4 digits, even if mask only has
# 2 digits, so we can always properly process the intended year for
# date validation (leap years, etc.)
if self._mask.find('CCC') != -1: self._dateExtent = 11
else: self._dateExtent = 10
self._4digityear = len(self._mask) > 8 and self._mask[9] == '#'
if self._isDate and self._autoformat:
# Auto-decide datestyle:
if self._autoformat.find('MDDY') != -1: self._datestyle = 'MDY'
elif self._autoformat.find('YMMD') != -1: self._datestyle = 'YMD'
elif self._autoformat.find('YMMMD') != -1: self._datestyle = 'YMD'
elif self._autoformat.find('DMMY') != -1: self._datestyle = 'DMY'
elif self._autoformat.find('DMMMY') != -1: self._datestyle = 'DMY'
# Give derived controls a chance to react to parameter changes before
# potentially changing current value of the control.
self._OnCtrlParametersChanged()
if self.controlInitialized:
# Then the base control is available for configuration;
# take action on base control based on new settings, as appropriate.
if kwargs.has_key('useFixedWidthFont'):
# Set control font - fixed width by default
self._setFont()
if reset_args.has_key('reset_mask'):
## dbg('reset mask')
curvalue = self._GetValue()
if curvalue.strip():
try:
## dbg('attempting to _SetInitialValue(%s)' % self._GetValue())
self._SetInitialValue(self._GetValue())
except Exception, e:
## dbg('exception caught:', e)
## dbg("current value doesn't work; attempting to reset to template")
self._SetInitialValue()
else:
## dbg('attempting to _SetInitialValue() with template')
self._SetInitialValue()
elif kwargs.has_key('useParensForNegatives'):
newvalue = self._getSignedValue()[0]
if newvalue is not None:
# Adjust for new mask:
if len(newvalue) < len(self._mask):
newvalue += ' '
elif len(newvalue) > len(self._mask):
if newvalue[-1] in (' ', ')'):
newvalue = newvalue[:-1]
## dbg('reconfiguring value for parens:"%s"' % newvalue)
self._SetValue(newvalue)
if self._prevValue != newvalue:
self._prevValue = newvalue # disallow undo of sign type
if self._autofit:
## dbg('calculated size:', self._CalcSize())
self.SetClientSize(self._CalcSize())
width = self.GetSize().width
height = self.GetBestSize().height
## dbg('setting client size to:', (width, height))
self.SetInitialSize((width, height))
# Set value/type-specific formatting
self._applyFormatting()
## dbg(indent=0, suspend=0)
def SetMaskParameters(self, **kwargs):
""" old name for the SetCtrlParameters function (DEPRECATED)"""
return self.SetCtrlParameters(**kwargs)
def GetCtrlParameter(self, paramname):
"""
Routine for retrieving the value of any given parameter
"""
if MaskedEditMixin.valid_ctrl_params.has_key(paramname.replace('Color','Colour')):
return getattr(self, '_' + paramname.replace('Color', 'Colour'))
elif Field.valid_params.has_key(paramname):
return self._ctrl_constraints._GetParameter(paramname)
else:
TypeError('"%s".GetCtrlParameter: invalid parameter "%s"' % (self.name, paramname))
def GetMaskParameter(self, paramname):
""" old name for the GetCtrlParameters function (DEPRECATED)"""
return self.GetCtrlParameter(paramname)
## This idea worked, but Boa was unable to use this solution...
## def _attachMethod(self, func):
## import new
## setattr(self, func.__name__, new.instancemethod(func, self, self.__class__))
##
##
## def _DefinePropertyFunctions(exposed_params):
## for param in exposed_params:
## propname = param[0].upper() + param[1:]
##
## exec('def Set%s(self, value): self.SetCtrlParameters(%s=value)' % (propname, param))
## exec('def Get%s(self): return self.GetCtrlParameter("%s")''' % (propname, param))
## self._attachMethod(locals()['Set%s' % propname])
## self._attachMethod(locals()['Get%s' % propname])
##
## if param.find('Colour') != -1:
## # add non-british spellings, for backward-compatibility
## propname.replace('Colour', 'Color')
##
## exec('def Set%s(self, value): self.SetCtrlParameters(%s=value)' % (propname, param))
## exec('def Get%s(self): return self.GetCtrlParameter("%s")''' % (propname, param))
## self._attachMethod(locals()['Set%s' % propname])
## self._attachMethod(locals()['Get%s' % propname])
##
def SetFieldParameters(self, field_index, **kwargs):
"""
Routine provided to modify the parameters of a given field.
Because changes to fields can affect the overall control,
direct access to the fields is prevented, and the control
is always "reconfigured" after setting a field parameter.
(See maskededit module overview for the list of valid field-level
parameters.)
"""
if field_index not in self._field_indices:
ie = IndexError('%s is not a valid field for control "%s".' % (str(field_index), self.name))
ie.index = field_index
raise ie
# set parameters as requested:
self._fields[field_index]._SetParameters(**kwargs)
# Possibly reprogram control template due to resulting changes, and ensure
# control-level params are still propagated to fields:
self._configure(self._previous_mask)
self._fields[field_index]._ValidateParameters(**kwargs)
if self.controlInitialized:
if kwargs.has_key('fillChar') or kwargs.has_key('defaultValue'):
self._SetInitialValue()
if self._autofit:
# this is tricky, because, as Robin explains:
# "Basically there are two sizes to deal with, that are potentially
# different. The client size is the inside size and may, depending
# on platform, exclude the borders and such. The normal size is
# the outside size that does include the borders. What you are
# calculating (in _CalcSize) is the client size, but the sizers
# deal with the full size and so that is the minimum size that
# we need to set with SetInitialSize. The root of the problem is
# that in _calcSize the current client size height is returned,
# instead of a height based on the current font. So I suggest using
# _calcSize to just get the width, and then use GetBestSize to
# get the height."
self.SetClientSize(self._CalcSize())
width = self.GetSize().width
height = self.GetBestSize().height
self.SetInitialSize((width, height))
# Set value/type-specific formatting
self._applyFormatting()
def GetFieldParameter(self, field_index, paramname):
"""
Routine provided for getting a parameter of an individual field.
"""
if field_index not in self._field_indices:
ie = IndexError('%s is not a valid field for control "%s".' % (str(field_index), self.name))
ie.index = field_index
raise ie
elif Field.valid_params.has_key(paramname):
return self._fields[field_index]._GetParameter(paramname)
else:
ae = AttributeError('"%s".GetFieldParameter: invalid parameter "%s"' % (self.name, paramname))
ae.attribute = paramname
raise ae
def _SetKeycodeHandler(self, keycode, func):
"""
This function adds and/or replaces key event handling functions
used by the control. <func> should take the event as argument
and return False if no further action on the key is necessary.
"""
if func:
self._keyhandlers[keycode] = func
elif self._keyhandlers.has_key(keycode):
del self._keyhandlers[keycode]
def _SetKeyHandler(self, char, func):
"""
This function adds and/or replaces key event handling functions
for ascii characters. <func> should take the event as argument
and return False if no further action on the key is necessary.
"""
self._SetKeycodeHandler(ord(char), func)
def _AddNavKeycode(self, keycode, handler=None):
"""
This function allows a derived subclass to augment the list of
keycodes that are considered "navigational" keys.
"""
self._nav.append(keycode)
if handler:
self._keyhandlers[keycode] = handler
elif self.keyhandlers.has_key(keycode):
del self._keyhandlers[keycode]
def _AddNavKey(self, char, handler=None):
"""
This function is a convenience function so you don't have to
remember to call ord() for ascii chars to be used for navigation.
"""
self._AddNavKeycode(ord(char), handler)
def _GetNavKeycodes(self):
"""
This function retrieves the current list of navigational keycodes for
the control.
"""
return self._nav
def _SetNavKeycodes(self, keycode_func_tuples):
"""
This function allows you to replace the current list of keycode processed
as navigation keys, and bind associated optional keyhandlers.
"""
self._nav = []
for keycode, func in keycode_func_tuples:
self._nav.append(keycode)
if func:
self._keyhandlers[keycode] = func
elif self.keyhandlers.has_key(keycode):
del self._keyhandlers[keycode]
def _processMask(self, mask):
"""
This subroutine expands {n} syntax in mask strings, and looks for escaped
special characters and returns the expanded mask, and an dictionary
of booleans indicating whether or not a given position in the mask is
a mask character or not.
"""
## dbg('_processMask: mask', mask, indent=1)
# regular expression for parsing c{n} syntax:
rex = re.compile('([' +string.join(maskchars,"") + '])\{(\d+)\}')
s = mask
match = rex.search(s)
while match: # found an(other) occurrence
maskchr = s[match.start(1):match.end(1)] # char to be repeated
repcount = int(s[match.start(2):match.end(2)]) # the number of times
replacement = string.join( maskchr * repcount, "") # the resulting substr
s = s[:match.start(1)] + replacement + s[match.end(2)+1:] #account for trailing '}'
match = rex.search(s) # look for another such entry in mask
self._decimalChar = self._ctrl_constraints._decimalChar
self._shiftDecimalChar = self._ctrl_constraints._shiftDecimalChar
self._isFloat = _isFloatingPoint(s) and not self._ctrl_constraints._validRegex
self._isInt = _isInteger(s) and not self._ctrl_constraints._validRegex
self._signOk = '-' in self._ctrl_constraints._formatcodes and (self._isFloat or self._isInt)
self._useParens = self._ctrl_constraints._useParensForNegatives
self._isNeg = False
#### dbg('self._signOk?', self._signOk, 'self._useParens?', self._useParens)
#### dbg('isFloatingPoint(%s)?' % (s), _isFloatingPoint(s),
## 'ctrl regex:', self._ctrl_constraints._validRegex)
if self._signOk and s[0] != ' ':
s = ' ' + s
if self._ctrl_constraints._defaultValue and self._ctrl_constraints._defaultValue[0] != ' ':
self._ctrl_constraints._defaultValue = ' ' + self._ctrl_constraints._defaultValue
self._signpos = 0
if self._useParens:
s += ' '
self._ctrl_constraints._defaultValue += ' '
# Now, go build up a dictionary of booleans, indexed by position,
# indicating whether or not a given position is masked or not.
# Also, strip out any '|' chars, adjusting the mask as necessary,
# marking the appropriate positions for field boundaries:
ismasked = {}
explicit_field_boundaries = []
s = list(s)
i = 0
while i < len(s):
if s[i] == '\\': # if escaped character:
ismasked[i] = False # mark position as not a mask char
if i+1 < len(s): # if another char follows...
del s[i] # elide the '\'
if s[i] == '\\': # if next char also a '\', char is a literal '\'
del s[i] # elide the 2nd '\' as well
i += 1 # increment to next char
elif s[i] == '|':
del s[i] # elide the '|'
explicit_field_boundaries.append(i)
# keep index where it is:
else: # else if special char, mark position accordingly
ismasked[i] = s[i] in maskchars
#### dbg('ismasked[%d]:' % i, ismasked[i], ''.join(s))
i += 1 # increment to next char
#### dbg('ismasked:', ismasked)
s = ''.join(s)
## dbg('new mask: "%s"' % s, indent=0)
return s, ismasked, explicit_field_boundaries
def _calcFieldExtents(self):
"""
Subroutine responsible for establishing/configuring field instances with
indices and editable extents appropriate to the specified mask, and building
the lookup table mapping each position to the corresponding field.
"""
self._lookupField = {}
if self._mask:
## Create dictionary of positions,characters in mask
self.maskdict = {}
for charnum in range( len( self._mask)):
self.maskdict[charnum] = self._mask[charnum:charnum+1]
# For the current mask, create an ordered list of field extents
# and a dictionary of positions that map to field indices:
if self._signOk: start = 1
else: start = 0
if self._isFloat:
# Skip field "discovery", and just construct a 2-field control with appropriate
# constraints for a floating-point entry.
# .setdefault always constructs 2nd argument even if not needed, so we do this
# the old-fashioned way...
if not self._fields.has_key(0):
self._fields[0] = Field()
if not self._fields.has_key(1):
self._fields[1] = Field()
self._decimalpos = string.find( self._mask, '.')
## dbg('decimal pos =', self._decimalpos)
formatcodes = self._fields[0]._GetParameter('formatcodes')
if 'R' not in formatcodes: formatcodes += 'R'
self._fields[0]._SetParameters(index=0, extent=(start, self._decimalpos),
mask=self._mask[start:self._decimalpos], formatcodes=formatcodes)
end = len(self._mask)
if self._signOk and self._useParens:
end -= 1
self._fields[1]._SetParameters(index=1, extent=(self._decimalpos+1, end),
mask=self._mask[self._decimalpos+1:end])
for i in range(self._decimalpos+1):
self._lookupField[i] = 0
for i in range(self._decimalpos+1, len(self._mask)+1):
self._lookupField[i] = 1
elif self._isInt:
# Skip field "discovery", and just construct a 1-field control with appropriate
# constraints for a integer entry.
if not self._fields.has_key(0):
self._fields[0] = Field(index=0)
end = len(self._mask)
if self._signOk and self._useParens:
end -= 1
self._fields[0]._SetParameters(index=0, extent=(start, end),
mask=self._mask[start:end])
for i in range(len(self._mask)+1):
self._lookupField[i] = 0
else:
# generic control; parse mask to figure out where the fields are:
field_index = 0
pos = 0
i = self._findNextEntry(pos,adjustInsert=False) # go to 1st entry point:
if i < len(self._mask): # no editable chars!
for j in range(pos, i+1):
self._lookupField[j] = field_index
pos = i # figure out field for 1st editable space:
while i <= len(self._mask):
#### dbg('searching: outer field loop: i = ', i)
if self._isMaskChar(i):
#### dbg('1st char is mask char; recording edit_start=', i)
edit_start = i
# Skip to end of editable part of current field:
while i < len(self._mask) and self._isMaskChar(i):
self._lookupField[i] = field_index
i += 1
if i in self._explicit_field_boundaries:
break
#### dbg('edit_end =', i)
edit_end = i
self._lookupField[i] = field_index
#### dbg('self._fields.has_key(%d)?' % field_index, self._fields.has_key(field_index))
if not self._fields.has_key(field_index):
kwargs = Field.valid_params.copy()
kwargs['index'] = field_index
kwargs['extent'] = (edit_start, edit_end)
kwargs['mask'] = self._mask[edit_start:edit_end]
self._fields[field_index] = Field(**kwargs)
else:
self._fields[field_index]._SetParameters(
index=field_index,
extent=(edit_start, edit_end),
mask=self._mask[edit_start:edit_end])
pos = i
i = self._findNextEntry(pos, adjustInsert=False) # go to next field:
#### dbg('next entry:', i)
if i > pos:
for j in range(pos, i+1):
self._lookupField[j] = field_index
if i >= len(self._mask):
break # if past end, we're done
else:
field_index += 1
#### dbg('next field:', field_index)
indices = self._fields.keys()
indices.sort()
self._field_indices = indices[1:]
#### dbg('lookupField map:', indent=1)
## for i in range(len(self._mask)):
#### dbg('pos %d:' % i, self._lookupField[i])
#### dbg(indent=0)
# Verify that all field indices specified are valid for mask:
for index in self._fields.keys():
if index not in [-1] + self._lookupField.values():
ie = IndexError('field %d is not a valid field for mask "%s"' % (index, self._mask))
ie.index = index
raise ie
def _calcTemplate(self, reset_fillchar, reset_default):
"""
Subroutine for processing current fillchars and default values for
whole control and individual fields, constructing the resulting
overall template, and adjusting the current value as necessary.
"""
default_set = False
if self._ctrl_constraints._defaultValue:
default_set = True
else:
for field in self._fields.values():
if field._defaultValue and not reset_default:
default_set = True
## dbg('default set?', default_set)
# Determine overall new template for control, and keep track of previous
# values, so that current control value can be modified as appropriate:
if self.controlInitialized: curvalue = list(self._GetValue())
else: curvalue = None
if hasattr(self, '_fillChar'): old_fillchars = self._fillChar
else: old_fillchars = None
if hasattr(self, '_template'): old_template = self._template
else: old_template = None
self._template = ""
self._fillChar = {}
reset_value = False
for field in self._fields.values():
field._template = ""
for pos in range(len(self._mask)):
#### dbg('pos:', pos)
field = self._FindField(pos)
#### dbg('field:', field._index)
start, end = field._extent
if pos == 0 and self._signOk:
self._template = ' ' # always make 1st 1st position blank, regardless of fillchar
elif self._isFloat and pos == self._decimalpos:
self._template += self._decimalChar
elif self._isMaskChar(pos):
if field._fillChar != self._ctrl_constraints._fillChar and not reset_fillchar:
fillChar = field._fillChar
else:
fillChar = self._ctrl_constraints._fillChar
self._fillChar[pos] = fillChar
# Replace any current old fillchar with new one in current value;
# if action required, set reset_value flag so we can take that action
# after we're all done
if self.controlInitialized and old_fillchars and old_fillchars.has_key(pos) and curvalue:
if curvalue[pos] == old_fillchars[pos] and old_fillchars[pos] != fillChar:
reset_value = True
curvalue[pos] = fillChar
if not field._defaultValue and not self._ctrl_constraints._defaultValue:
#### dbg('no default value')
self._template += fillChar
field._template += fillChar
elif field._defaultValue and not reset_default:
#### dbg('len(field._defaultValue):', len(field._defaultValue))
#### dbg('pos-start:', pos-start)
if len(field._defaultValue) > pos-start:
#### dbg('field._defaultValue[pos-start]: "%s"' % field._defaultValue[pos-start])
self._template += field._defaultValue[pos-start]
field._template += field._defaultValue[pos-start]
else:
#### dbg('field default not long enough; using fillChar')
self._template += fillChar
field._template += fillChar
else:
if len(self._ctrl_constraints._defaultValue) > pos:
#### dbg('using control default')
self._template += self._ctrl_constraints._defaultValue[pos]
field._template += self._ctrl_constraints._defaultValue[pos]
else:
#### dbg('ctrl default not long enough; using fillChar')
self._template += fillChar
field._template += fillChar
#### dbg('field[%d]._template now "%s"' % (field._index, field._template))
#### dbg('self._template now "%s"' % self._template)
else:
self._template += self._mask[pos]
self._fields[-1]._template = self._template # (for consistency)
if curvalue: # had an old value, put new one back together
newvalue = string.join(curvalue, "")
else:
newvalue = None
if default_set:
self._defaultValue = self._template
## dbg('self._defaultValue:', self._defaultValue)
if not self.IsEmpty(self._defaultValue) and not self.IsValid(self._defaultValue):
#### dbg(indent=0)
ve = ValueError('Default value of "%s" is not a valid value for control "%s"' % (self._defaultValue, self.name))
ve.value = self._defaultValue
raise ve
# if no fillchar change, but old value == old template, replace it:
if newvalue == old_template:
newvalue = self._template
reset_value = True
else:
self._defaultValue = None
if reset_value:
## dbg('resetting value to: "%s"' % newvalue)
pos = self._GetInsertionPoint()
sel_start, sel_to = self._GetSelection()
self._SetValue(newvalue)
self._SetInsertionPoint(pos)
self._SetSelection(sel_start, sel_to)
def _propagateConstraints(self, **reset_args):
"""
Subroutine for propagating changes to control-level constraints and
formatting to the individual fields as appropriate.
"""
parent_codes = self._ctrl_constraints._formatcodes
parent_includes = self._ctrl_constraints._includeChars
parent_excludes = self._ctrl_constraints._excludeChars
for i in self._field_indices:
field = self._fields[i]
inherit_args = {}
if len(self._field_indices) == 1:
inherit_args['formatcodes'] = parent_codes
inherit_args['includeChars'] = parent_includes
inherit_args['excludeChars'] = parent_excludes
else:
field_codes = current_codes = field._GetParameter('formatcodes')
for c in parent_codes:
if c not in field_codes: field_codes += c
if field_codes != current_codes:
inherit_args['formatcodes'] = field_codes
include_chars = current_includes = field._GetParameter('includeChars')
for c in parent_includes:
if not c in include_chars: include_chars += c
if include_chars != current_includes:
inherit_args['includeChars'] = include_chars
exclude_chars = current_excludes = field._GetParameter('excludeChars')
for c in parent_excludes:
if not c in exclude_chars: exclude_chars += c
if exclude_chars != current_excludes:
inherit_args['excludeChars'] = exclude_chars
if reset_args.has_key('defaultValue') and reset_args['defaultValue']:
inherit_args['defaultValue'] = "" # (reset for field)
for param in Field.propagating_params:
#### dbg('reset_args.has_key(%s)?' % param, reset_args.has_key(param))
#### dbg('reset_args.has_key(%(param)s) and reset_args[%(param)s]?' % locals(), reset_args.has_key(param) and reset_args[param])
if reset_args.has_key(param):
inherit_args[param] = self.GetCtrlParameter(param)
#### dbg('inherit_args[%s]' % param, inherit_args[param])
if inherit_args:
field._SetParameters(**inherit_args)
field._ValidateParameters(**inherit_args)
def _validateChoices(self):
"""
Subroutine that validates that all choices for given fields are at
least of the necessary length, and that they all would be valid pastes
if pasted into their respective fields.
"""
for field in self._fields.values():
if field._choices:
index = field._index
if len(self._field_indices) == 1 and index == 0 and field._choices == self._ctrl_constraints._choices:
## dbg('skipping (duplicate) choice validation of field 0')
continue
#### dbg('checking for choices for field', field._index)
start, end = field._extent
field_length = end - start
#### dbg('start, end, length:', start, end, field_length)
for choice in field._choices:
#### dbg('testing "%s"' % choice)
valid_paste, ignore, replace_to = self._validatePaste(choice, start, end)
if not valid_paste:
#### dbg(indent=0)
ve = ValueError('"%s" could not be entered into field %d of control "%s"' % (choice, index, self.name))
ve.value = choice
ve.index = index
raise ve
elif replace_to > end:
#### dbg(indent=0)
ve = ValueError('"%s" will not fit into field %d of control "%s"' (choice, index, self.name))
ve.value = choice
ve.index = index
raise ve
#### dbg(choice, 'valid in field', index)
def _configure(self, mask, **reset_args):
"""
This function sets flags for automatic styling options. It is
called whenever a control or field-level parameter is set/changed.
This routine does the bulk of the interdependent parameter processing, determining
the field extents of the mask if changed, resetting parameters as appropriate,
determining the overall template value for the control, etc.
reset_args is supplied if called from control's .SetCtrlParameters()
routine, and indicates which if any parameters which can be
overridden by individual fields have been reset by request for the
whole control.
"""
## dbg(suspend=1)
## dbg('MaskedEditMixin::_configure("%s")' % mask, indent=1)
# Preprocess specified mask to expand {n} syntax, handle escaped
# mask characters, etc and build the resulting positionally keyed
# dictionary for which positions are mask vs. template characters:
self._mask, self._ismasked, self._explicit_field_boundaries = self._processMask(mask)
self._masklength = len(self._mask)
#### dbg('processed mask:', self._mask)
# Preserve original mask specified, for subsequent reprocessing
# if parameters change.
## dbg('mask: "%s"' % self._mask, 'previous mask: "%s"' % self._previous_mask)
self._previous_mask = mask # save unexpanded mask for next time
# Set expanded mask and extent of field -1 to width of entire control:
self._ctrl_constraints._SetParameters(mask = self._mask, extent=(0,self._masklength))
# Go parse mask to determine where each field is, construct field
# instances as necessary, configure them with those extents, and
# build lookup table mapping each position for control to its corresponding
# field.
#### dbg('calculating field extents')
self._calcFieldExtents()
# Go process defaultValues and fillchars to construct the overall
# template, and adjust the current value as necessary:
reset_fillchar = reset_args.has_key('fillChar') and reset_args['fillChar']
reset_default = reset_args.has_key('defaultValue') and reset_args['defaultValue']
#### dbg('calculating template')
self._calcTemplate(reset_fillchar, reset_default)
# Propagate control-level formatting and character constraints to each
# field if they don't already have them; if only one field, propagate
# control-level validation constraints to field as well:
#### dbg('propagating constraints')
self._propagateConstraints(**reset_args)
if self._isFloat and self._fields[0]._groupChar == self._decimalChar:
raise AttributeError('groupChar (%s) and decimalChar (%s) must be distinct.' %
(self._fields[0]._groupChar, self._decimalChar) )
#### dbg('fields:', indent=1)
## for i in [-1] + self._field_indices:
#### dbg('field %d:' % i, self._fields[i].__dict__)
#### dbg(indent=0)
# Set up special parameters for numeric control, if appropriate:
if self._signOk:
self._signpos = 0 # assume it starts here, but it will move around on floats
signkeys = ['-', '+', ' ']
if self._useParens:
signkeys += ['(', ')']
for key in signkeys:
keycode = ord(key)
if not self._keyhandlers.has_key(keycode):
self._SetKeyHandler(key, self._OnChangeSign)
elif self._isInt or self._isFloat:
signkeys = ['-', '+', ' ', '(', ')']
for key in signkeys:
keycode = ord(key)
if self._keyhandlers.has_key(keycode) and self._keyhandlers[keycode] == self._OnChangeSign:
self._SetKeyHandler(key, None)
if self._isFloat or self._isInt:
if self.controlInitialized:
value = self._GetValue()
#### dbg('value: "%s"' % value, 'len(value):', len(value),
## 'len(self._ctrl_constraints._mask):',len(self._ctrl_constraints._mask))
if len(value) < len(self._ctrl_constraints._mask):
newvalue = value
if self._useParens and len(newvalue) < len(self._ctrl_constraints._mask) and newvalue.find('(') == -1:
newvalue += ' '
if self._signOk and len(newvalue) < len(self._ctrl_constraints._mask) and newvalue.find(')') == -1:
newvalue = ' ' + newvalue
if len(newvalue) < len(self._ctrl_constraints._mask):
if self._ctrl_constraints._alignRight:
newvalue = newvalue.rjust(len(self._ctrl_constraints._mask))
else:
newvalue = newvalue.ljust(len(self._ctrl_constraints._mask))
## dbg('old value: "%s"' % value)
## dbg('new value: "%s"' % newvalue)
try:
self._ChangeValue(newvalue)
except Exception, e:
## dbg('exception raised:', e, 'resetting to initial value')
self._SetInitialValue()
elif len(value) > len(self._ctrl_constraints._mask):
newvalue = value
if not self._useParens and newvalue[-1] == ' ':
newvalue = newvalue[:-1]
if not self._signOk and len(newvalue) > len(self._ctrl_constraints._mask):
newvalue = newvalue[1:]
if not self._signOk:
newvalue, signpos, right_signpos = self._getSignedValue(newvalue)
## dbg('old value: "%s"' % value)
## dbg('new value: "%s"' % newvalue)
try:
self._ChangeValue(newvalue)
except Exception, e:
## dbg('exception raised:', e, 'resetting to initial value')
self._SetInitialValue()
elif not self._signOk and ('(' in value or '-' in value):
newvalue, signpos, right_signpos = self._getSignedValue(value)
## dbg('old value: "%s"' % value)
## dbg('new value: "%s"' % newvalue)
try:
self._ChangeValue(newvalue)
except e:
## dbg('exception raised:', e, 'resetting to initial value')
self._SetInitialValue()
# Replace up/down arrow default handling:
# make down act like tab, up act like shift-tab:
#### dbg('Registering numeric navigation and control handlers (if not already set)')
if not self._keyhandlers.has_key(wx.WXK_DOWN):
self._SetKeycodeHandler(wx.WXK_DOWN, self._OnChangeField)
if not self._keyhandlers.has_key(wx.WXK_NUMPAD_DOWN):
self._SetKeycodeHandler(wx.WXK_DOWN, self._OnChangeField)
if not self._keyhandlers.has_key(wx.WXK_UP):
self._SetKeycodeHandler(wx.WXK_UP, self._OnUpNumeric) # (adds "shift" to up arrow, and calls _OnChangeField)
if not self._keyhandlers.has_key(wx.WXK_NUMPAD_UP):
self._SetKeycodeHandler(wx.WXK_UP, self._OnUpNumeric) # (adds "shift" to up arrow, and calls _OnChangeField)
# On ., truncate contents right of cursor to decimal point (if any)
# leaves cursor after decimal point if floating point, otherwise at 0.
if not self._keyhandlers.has_key(ord(self._decimalChar)) or self._keyhandlers[ord(self._decimalChar)] != self._OnDecimalPoint:
self._SetKeyHandler(self._decimalChar, self._OnDecimalPoint)
if not self._keyhandlers.has_key(ord(self._shiftDecimalChar)) or self._keyhandlers[ord(self._shiftDecimalChar)] != self._OnChangeField:
self._SetKeyHandler(self._shiftDecimalChar, self._OnChangeField) # (Shift-'.' == '>' on US keyboards)
# Allow selective insert of groupchar in numbers:
if not self._keyhandlers.has_key(ord(self._fields[0]._groupChar)) or self._keyhandlers[ord(self._fields[0]._groupChar)] != self._OnGroupChar:
self._SetKeyHandler(self._fields[0]._groupChar, self._OnGroupChar)
## dbg(indent=0, suspend=0)
def _SetInitialValue(self, value=""):
"""
fills the control with the generated or supplied default value.
It will also set/reset the font if necessary and apply
formatting to the control at this time.
"""
## dbg('MaskedEditMixin::_SetInitialValue("%s")' % value, indent=1)
if not value:
self._prevValue = self._curValue = self._template
# don't apply external validation rules in this case, as template may
# not coincide with "legal" value...
try:
if isinstance(self, wx.TextCtrl):
self._ChangeValue(self._curValue) # note the use of "raw" ._ChangeValue()...
else:
self._SetValue(self._curValue) # note the use of "raw" ._SetValue()...
except Exception, e:
## dbg('exception thrown:', e, indent=0)
raise
else:
# Otherwise apply validation as appropriate to passed value:
#### dbg('value = "%s", length:' % value, len(value))
self._prevValue = self._curValue = value
try:
if isinstance(self, wx.TextCtrl):
self.ChangeValue(value) # use public (validating) .SetValue()
else:
self.SetValue(value)
except Exception, e:
## dbg('exception thrown:', e, indent=0)
raise
# Set value/type-specific formatting
self._applyFormatting()
## dbg(indent=0)
def _calcSize(self, size=None):
""" Calculate automatic size if allowed; must be called after the base control is instantiated"""
#### dbg('MaskedEditMixin::_calcSize', indent=1)
cont = (size is None or size == wx.DefaultSize)
if cont and self._autofit:
sizing_text = 'M' * self._masklength
if wx.Platform != "__WXMSW__": # give it a little extra space
sizing_text += 'M'
if wx.Platform == "__WXMAC__": # give it even a little more...
sizing_text += 'M'
#### dbg('len(sizing_text):', len(sizing_text), 'sizing_text: "%s"' % sizing_text)
w, h = self.GetTextExtent(sizing_text)
size = (w+4, self.GetSize().height)
#### dbg('size:', size, indent=0)
return size
def _setFont(self):
""" Set the control's font typeface -- pass the font name as str."""
#### dbg('MaskedEditMixin::_setFont', indent=1)
if not self._useFixedWidthFont:
self._font = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)
else:
font = self.GetFont() # get size, weight, etc from current font
points = font.GetPointSize()
if 'wxMac' in wx.PlatformInfo \
and self.GetWindowVariant() == wx.WINDOW_VARIANT_MINI:
points -= 1
# Set to teletype font (guaranteed to be mappable to all wxWindows
# platforms:
self._font = wx.Font( points, wx.TELETYPE, font.GetStyle(),
font.GetWeight(), font.GetUnderlined())
#### dbg('font string: "%s"' % font.GetNativeFontInfo().ToString())
self.SetFont(self._font)
#### dbg(indent=0)
def _OnTextChange(self, event):
"""
Handler for EVT_TEXT event.
self._Change() is provided for subclasses, and may return False to
skip this method logic. This function returns True if the event
detected was a legitimate event, or False if it was a "bogus"
EVT_TEXT event. (NOTE: There is currently an issue with calling
.SetValue from within the EVT_CHAR handler that causes duplicate
EVT_TEXT events for the same change.)
"""
newvalue = self._GetValue()
## dbg('MaskedEditMixin::_OnTextChange: value: "%s"' % newvalue, indent=1)
bValid = False
if self._ignoreChange: # ie. if an "intermediate text change event"
## dbg(indent=0)
return bValid
##! WS: For some inexplicable reason, every wx.TextCtrl.SetValue
## call is generating two (2) EVT_TEXT events. On certain platforms,
## (eg. linux/GTK) the 1st is an empty string value.
## This is the only mechanism I can find to mask this problem:
if newvalue == self._curValue or len(newvalue) == 0:
## dbg('ignoring bogus text change event', indent=0)
pass
else:
## dbg('curvalue: "%s", newvalue: "%s", len(newvalue): %d' % (self._curValue, newvalue, len(newvalue)))
if self._Change():
if self._signOk and self._isNeg and newvalue.find('-') == -1 and newvalue.find('(') == -1:
## dbg('clearing self._isNeg')
self._isNeg = False
text, self._signpos, self._right_signpos = self._getSignedValue()
self._CheckValid() # Recolor control as appropriate
## dbg('calling event.Skip()')
event.Skip()
bValid = True
self._prevValue = self._curValue # save for undo
self._curValue = newvalue # Save last seen value for next iteration
## dbg(indent=0)
return bValid
def _OnKeyDown(self, event):
"""
This function allows the control to capture Ctrl-events like Ctrl-tab,
that are not normally seen by the "cooked" EVT_CHAR routine.
"""
# Get keypress value, adjusted by control options (e.g. convert to upper etc)
key = event.GetKeyCode()
if key in self._nav and event.ControlDown():
# then this is the only place we will likely see these events;
# process them now:
## dbg('MaskedEditMixin::OnKeyDown: calling _OnChar')
self._OnChar(event)
return
# else allow regular EVT_CHAR key processing
event.Skip()
def _OnChar(self, event):
"""
This is the engine of MaskedEdit controls. It examines each keystroke,
decides if it's allowed, where it should go or what action to take.
"""
## dbg('MaskedEditMixin::_OnChar', indent=1)
# Get keypress value, adjusted by control options (e.g. convert to upper etc)
key = event.GetKeyCode()
orig_pos = self._GetInsertionPoint()
orig_value = self._GetValue()
## dbg('keycode = ', key)
## dbg('current pos = ', orig_pos)
## dbg('current selection = ', self._GetSelection())
if not self._Keypress(key):
## dbg(indent=0)
return
# If no format string for this control, or the control is marked as "read-only",
# skip the rest of the special processing, and just "do the standard thing:"
if not self._mask or not self._IsEditable():
event.Skip()
## dbg(indent=0)
return
# Process navigation and control keys first, with
# position/selection unadulterated:
if key in self._nav + self._control:
if self._keyhandlers.has_key(key):
keep_processing = self._keyhandlers[key](event)
if self._GetValue() != orig_value:
self.modified = True
if not keep_processing:
## dbg(indent=0)
return
self._applyFormatting()
## dbg(indent=0)
return
# Else... adjust the position as necessary for next input key,
# and determine resulting selection:
pos = self._adjustPos( orig_pos, key ) ## get insertion position, adjusted as needed
sel_start, sel_to = self._GetSelection() ## check for a range of selected text
## dbg("pos, sel_start, sel_to:", pos, sel_start, sel_to)
keep_processing = True
# Capture user past end of format field
if pos > len(self.maskdict):
## dbg("field length exceeded:",pos)
keep_processing = False
key = self._adjustKey(pos, key) # apply formatting constraints to key:
if self._keyhandlers.has_key(key):
# there's an override for default behavior; use override function instead
## dbg('using supplied key handler:', self._keyhandlers[key])
keep_processing = self._keyhandlers[key](event)
if self._GetValue() != orig_value:
self.modified = True
if not keep_processing:
## dbg(indent=0)
return
# else skip default processing, but do final formatting
if key in wx_control_keycodes:
## dbg('key in wx_control_keycodes')
event.Skip() # non-printable; let base control handle it
keep_processing = False
else:
field = self._FindField(pos)
if 'unicode' in wx.PlatformInfo:
if key < 256:
char = chr(key) # (must work if we got this far)
char = char.decode(self._defaultEncoding)
else:
char = unichr(event.GetUnicodeKey())
## dbg('unicode char:', char)
excludes = u''
if type(field._excludeChars) != types.UnicodeType:
excludes += field._excludeChars.decode(self._defaultEncoding)
if type(self._ctrl_constraints) != types.UnicodeType:
excludes += self._ctrl_constraints._excludeChars.decode(self._defaultEncoding)
else:
char = chr(key) # (must work if we got this far)
excludes = field._excludeChars + self._ctrl_constraints._excludeChars
## dbg("key ='%s'" % chr(key))
if chr(key) == ' ':
## dbg('okSpaces?', field._okSpaces)
pass
if char in excludes:
keep_processing = False
if keep_processing and self._isCharAllowed( char, pos, checkRegex = True ):
## dbg("key allowed by mask")
# insert key into candidate new value, but don't change control yet:
oldstr = self._GetValue()
newstr, newpos, new_select_to, match_field, match_index = self._insertKey(
char, pos, sel_start, sel_to, self._GetValue(), allowAutoSelect = True)
## dbg("str with '%s' inserted:" % char, '"%s"' % newstr)
if self._ctrl_constraints._validRequired and not self.IsValid(newstr):
## dbg('not valid; checking to see if adjusted string is:')
keep_processing = False
if self._isFloat and newstr != self._template:
newstr = self._adjustFloat(newstr)
## dbg('adjusted str:', newstr)
if self.IsValid(newstr):
## dbg("it is!")
keep_processing = True
wx.CallAfter(self._SetInsertionPoint, self._decimalpos)
if not keep_processing:
## dbg("key disallowed by validation")
if not wx.Validator_IsSilent() and orig_pos == pos:
wx.Bell()
if keep_processing:
unadjusted = newstr
# special case: adjust date value as necessary:
if self._isDate and newstr != self._template:
newstr = self._adjustDate(newstr)
## dbg('adjusted newstr:', newstr)
if newstr != orig_value:
self.modified = True
wx.CallAfter(self._SetValue, newstr)
# Adjust insertion point on date if just entered 2 digit year, and there are now 4 digits:
if not self.IsDefault() and self._isDate and self._4digityear:
year2dig = self._dateExtent - 2
if pos == year2dig and unadjusted[year2dig] != newstr[year2dig]:
newpos = pos+2
## dbg('queuing insertion point: (%d)' % newpos)
wx.CallAfter(self._SetInsertionPoint, newpos)
if match_field is not None:
## dbg('matched field')
self._OnAutoSelect(match_field, match_index)
if new_select_to != newpos:
## dbg('queuing selection: (%d, %d)' % (newpos, new_select_to))
wx.CallAfter(self._SetSelection, newpos, new_select_to)
else:
newfield = self._FindField(newpos)
if newfield != field and newfield._selectOnFieldEntry:
## dbg('queuing insertion point: (%d)' % newfield._extent[0])
wx.CallAfter(self._SetInsertionPoint, newfield._extent[0])
## dbg('queuing selection: (%d, %d)' % (newfield._extent[0], newfield._extent[1]))
wx.CallAfter(self._SetSelection, newfield._extent[0], newfield._extent[1])
else:
wx.CallAfter(self._SetSelection, newpos, new_select_to)
keep_processing = False
elif keep_processing:
## dbg('char not allowed')
keep_processing = False
if (not wx.Validator_IsSilent()) and orig_pos == pos:
wx.Bell()
self._applyFormatting()
# Move to next insertion point
if keep_processing and key not in self._nav:
pos = self._GetInsertionPoint()
next_entry = self._findNextEntry( pos )
if pos != next_entry:
## dbg("moving from %(pos)d to next valid entry: %(next_entry)d" % locals())
wx.CallAfter(self._SetInsertionPoint, next_entry )
if self._isTemplateChar(pos):
self._AdjustField(pos)
## dbg(indent=0)
def _FindFieldExtent(self, pos=None, getslice=False, value=None):
""" returns editable extent of field corresponding to
position pos, and, optionally, the contents of that field
in the control or the value specified.
Template chars are bound to the preceding field.
For masks beginning with template chars, these chars are ignored
when calculating the current field.
Eg: with template (###) ###-####,
>>> self._FindFieldExtent(pos=0)
1, 4
>>> self._FindFieldExtent(pos=1)
1, 4
>>> self._FindFieldExtent(pos=5)
1, 4
>>> self._FindFieldExtent(pos=6)
6, 9
>>> self._FindFieldExtent(pos=10)
10, 14
etc.
"""
## dbg('MaskedEditMixin::_FindFieldExtent(pos=%s, getslice=%s)' % (str(pos), str(getslice)) ,indent=1)
field = self._FindField(pos)
if not field:
if getslice:
return None, None, ""
else:
return None, None
edit_start, edit_end = field._extent
if getslice:
if value is None: value = self._GetValue()
slice = value[edit_start:edit_end]
## dbg('edit_start:', edit_start, 'edit_end:', edit_end, 'slice: "%s"' % slice)
## dbg(indent=0)
return edit_start, edit_end, slice
else:
## dbg('edit_start:', edit_start, 'edit_end:', edit_end)
## dbg(indent=0)
return edit_start, edit_end
def _FindField(self, pos=None):
"""
Returns the field instance in which pos resides.
Template chars are bound to the preceding field.
For masks beginning with template chars, these chars are ignored
when calculating the current field.
"""
#### dbg('MaskedEditMixin::_FindField(pos=%s)' % str(pos) ,indent=1)
if pos is None: pos = self._GetInsertionPoint()
elif pos < 0 or pos > self._masklength:
raise IndexError('position %s out of range of control' % str(pos))
if len(self._fields) == 0:
## dbg(indent=0)
return None
# else...
#### dbg(indent=0)
return self._fields[self._lookupField[pos]]
def ClearValue(self):
""" Blanks the current control value by replacing it with the default value."""
## dbg("MaskedEditMixin::ClearValue - value reset to default value (template)")
self._SetValue( self._template )
self._SetInsertionPoint(0)
self.Refresh()
def ClearValueAlt(self):
""" Blanks the current control value by replacing it with the default value.
Using ChangeValue, so not to fire a change event"""
## dbg("MaskedEditMixin::ClearValueAlt - value reset to default value (template)")
self._ChangeValue( self._template )
self._SetInsertionPoint(0)
self.Refresh()
def _baseCtrlEventHandler(self, event):
"""
This function is used whenever a key should be handled by the base control.
"""
event.Skip()
return False
def _OnUpNumeric(self, event):
"""
Makes up-arrow act like shift-tab should; ie. take you to start of
previous field.
"""
## dbg('MaskedEditMixin::_OnUpNumeric', indent=1)
event.m_shiftDown = 1
## dbg('event.ShiftDown()?', event.ShiftDown())
self._OnChangeField(event)
## dbg(indent=0)
def _OnArrow(self, event):
"""
Used in response to left/right navigation keys; makes these actions skip
over mask template chars.
"""
## dbg("MaskedEditMixin::_OnArrow", indent=1)
pos = self._GetInsertionPoint()
keycode = event.GetKeyCode()
sel_start, sel_to = self._GetSelection()
entry_end = self._goEnd(getPosOnly=True)
if keycode in (wx.WXK_RIGHT, wx.WXK_DOWN, wx.WXK_NUMPAD_RIGHT, wx.WXK_NUMPAD_DOWN):
if( ( not self._isTemplateChar(pos) and pos+1 > entry_end)
or ( self._isTemplateChar(pos) and pos >= entry_end) ):
## dbg("can't advance", indent=0)
return False
elif self._isTemplateChar(pos):
self._AdjustField(pos)
elif keycode in (wx.WXK_LEFT, wx.WXK_UP, wx.WXK_NUMPAD_LEFT, wx.WXK_NUMPAD_UP) and sel_start == sel_to and pos > 0 and self._isTemplateChar(pos-1):
## dbg('adjusting field')
self._AdjustField(pos)
# treat as shifted up/down arrows as tab/reverse tab:
if event.ShiftDown() and keycode in (wx.WXK_UP, wx.WXK_DOWN, wx.WXK_NUMPAD_UP, wx.WXK_NUMPAD_DOWN):
# remove "shifting" and treat as (forward) tab:
event.m_shiftDown = False
keep_processing = self._OnChangeField(event)
elif self._FindField(pos)._selectOnFieldEntry:
if( keycode in (wx.WXK_UP, wx.WXK_LEFT, wx.WXK_NUMPAD_UP, wx.WXK_NUMPAD_LEFT)
and sel_start != 0
and self._isTemplateChar(sel_start-1)
and sel_start != self._masklength
and not self._signOk and not self._useParens):
# call _OnChangeField to handle "ctrl-shifted event"
# (which moves to previous field and selects it.)
event.m_shiftDown = True
event.m_ControlDown = True
keep_processing = self._OnChangeField(event)
elif( keycode in (wx.WXK_DOWN, wx.WXK_RIGHT, wx.WXK_NUMPAD_DOWN, wx.WXK_NUMPAD_RIGHT)
and sel_to != self._masklength
and self._isTemplateChar(sel_to)):
# when changing field to the right, ensure don't accidentally go left instead
event.m_shiftDown = False
keep_processing = self._OnChangeField(event)
else:
# treat arrows as normal, allowing selection
# as appropriate:
## dbg('using base ctrl event processing')
event.Skip()
else:
if( (sel_to == self._fields[0]._extent[0] and keycode in (wx.WXK_LEFT, wx.WXK_NUMPAD_LEFT) )
or (sel_to == self._masklength and keycode in (wx.WXK_RIGHT, wx.WXK_NUMPAD_RIGHT) ) ):
if not wx.Validator_IsSilent():
wx.Bell()
else:
# treat arrows as normal, allowing selection
# as appropriate:
## dbg('using base event processing')
event.Skip()
keep_processing = False
## dbg(indent=0)
return keep_processing
def _OnCtrl_S(self, event):
""" Default Ctrl-S handler; prints value information if demo enabled. """
## dbg("MaskedEditMixin::_OnCtrl_S")
if self._demo:
print 'MaskedEditMixin.GetValue() = "%s"\nMaskedEditMixin.GetPlainValue() = "%s"' % (self.GetValue(), self.GetPlainValue())
print "Valid? => " + str(self.IsValid())
print "Current field, start, end, value =", str( self._FindFieldExtent(getslice=True))
return False
def _OnCtrl_X(self, event=None):
""" Handles ctrl-x keypress in control and Cut operation on context menu.
Should return False to skip other processing. """
## dbg("MaskedEditMixin::_OnCtrl_X", indent=1)
self.Cut()
## dbg(indent=0)
return False
def _OnCtrl_C(self, event=None):
""" Handles ctrl-C keypress in control and Copy operation on context menu.
Uses base control handling. Should return False to skip other processing."""
self.Copy()
return False
def _OnCtrl_V(self, event=None):
""" Handles ctrl-V keypress in control and Paste operation on context menu.
Should return False to skip other processing. """
## dbg("MaskedEditMixin::_OnCtrl_V", indent=1)
self.Paste()
## dbg(indent=0)
return False
def _OnInsert(self, event=None):
""" Handles shift-insert and control-insert operations (paste and copy, respectively)"""
## dbg("MaskedEditMixin::_OnInsert", indent=1)
if event and isinstance(event, wx.KeyEvent):
if event.ShiftDown():
self.Paste()
elif event.ControlDown():
self.Copy()
# (else do nothing)
# (else do nothing)
## dbg(indent=0)
return False
def _OnDelete(self, event=None):
""" Handles shift-delete and delete operations (cut and erase, respectively)"""
## dbg("MaskedEditMixin::_OnDelete", indent=1)
if event and isinstance(event, wx.KeyEvent):
if event.ShiftDown():
self.Cut()
else:
self._OnErase(event)
else:
self._OnErase(event)
## dbg(indent=0)
return False
def _OnCtrl_Z(self, event=None):
""" Handles ctrl-Z keypress in control and Undo operation on context menu.
Should return False to skip other processing. """
## dbg("MaskedEditMixin::_OnCtrl_Z", indent=1)
self.Undo()
## dbg(indent=0)
return False
def _OnCtrl_A(self,event=None):
""" Handles ctrl-a keypress in control. Should return False to skip other processing. """
end = self._goEnd(getPosOnly=True)
if not event or (isinstance(event, wx.KeyEvent) and event.ShiftDown()):
wx.CallAfter(self._SetInsertionPoint, 0)
wx.CallAfter(self._SetSelection, 0, self._masklength)
else:
wx.CallAfter(self._SetInsertionPoint, 0)
wx.CallAfter(self._SetSelection, 0, end)
return False
def _OnErase(self, event=None, just_return_value=False):
""" Handles backspace and delete keypress in control. Should return False to skip other processing."""
## dbg("MaskedEditMixin::_OnErase", indent=1)
sel_start, sel_to = self._GetSelection() ## check for a range of selected text
if event is None: # called as action routine from Cut() operation.
key = wx.WXK_DELETE
else:
key = event.GetKeyCode()
field = self._FindField(sel_to)
start, end = field._extent
value = self._GetValue()
oldstart = sel_start
# If trying to erase beyond "legal" bounds, disallow operation:
if( (sel_to == 0 and key == wx.WXK_BACK)
or (self._signOk and sel_to == 1 and value[0] == ' ' and key == wx.WXK_BACK)
or (sel_to == self._masklength and sel_start == sel_to and key in (wx.WXK_DELETE, wx.WXK_NUMPAD_DELETE) and not field._insertRight)
or (self._signOk and self._useParens
and sel_start == sel_to
and sel_to == self._masklength - 1
and value[sel_to] == ' ' and key in (wx.WXK_DELETE, wx.WXK_NUMPAD_DELETE) and not field._insertRight) ):
if not wx.Validator_IsSilent():
wx.Bell()
## dbg(indent=0)
return False
if( field._insertRight # an insert-right field
and value[start:end] != self._template[start:end] # and field not empty
and sel_start >= start # and selection starts in field
and ((sel_to == sel_start # and no selection
and sel_to == end # and cursor at right edge
and key in (wx.WXK_BACK, wx.WXK_DELETE, wx.WXK_NUMPAD_DELETE)) # and either delete or backspace key
or # or
(key == wx.WXK_BACK # backspacing
and (sel_to == end # and selection ends at right edge
or sel_to < end and field._allowInsert)) ) ): # or allow right insert at any point in field
## dbg('delete left')
# if backspace but left of cursor is empty, adjust cursor right before deleting
while( key == wx.WXK_BACK
and sel_start == sel_to
and sel_start < end
and value[start:sel_start] == self._template[start:sel_start]):
sel_start += 1
sel_to = sel_start
## dbg('sel_start, start:', sel_start, start)
if sel_start == sel_to:
keep = sel_start -1
else:
keep = sel_start
newfield = value[start:keep] + value[sel_to:end]
# handle sign char moving from outside field into the field:
move_sign_into_field = False
if not field._padZero and self._signOk and self._isNeg and value[0] in ('-', '('):
signchar = value[0]
newfield = signchar + newfield
move_sign_into_field = True
## dbg('cut newfield: "%s"' % newfield)
# handle what should fill in from the left:
left = ""
for i in range(start, end - len(newfield)):
if field._padZero:
left += '0'
elif( self._signOk and self._isNeg and i == 1
and ((self._useParens and newfield.find('(') == -1)
or (not self._useParens and newfield.find('-') == -1)) ):
left += ' '
else:
left += self._template[i] # this can produce strange results in combination with default values...
newfield = left + newfield
## dbg('filled newfield: "%s"' % newfield)
newstr = value[:start] + newfield + value[end:]
# (handle sign located in "mask position" in front of field prior to delete)
if move_sign_into_field:
newstr = ' ' + newstr[1:]
pos = sel_to
else:
# handle erasure of (left) sign, moving selection accordingly...
if self._signOk and sel_start == 0:
newstr = value = ' ' + value[1:]
sel_start += 1
if field._allowInsert and sel_start >= start:
# selection (if any) falls within current insert-capable field:
select_len = sel_to - sel_start
# determine where cursor should end up:
if key == wx.WXK_BACK:
if select_len == 0:
newpos = sel_start -1
else:
newpos = sel_start
erase_to = sel_to
else:
newpos = sel_start
if sel_to == sel_start:
erase_to = sel_to + 1
else:
erase_to = sel_to
if self._isTemplateChar(newpos) and select_len == 0:
if self._signOk:
if value[newpos] in ('(', '-'):
newpos += 1 # don't move cusor
newstr = ' ' + value[newpos:]
elif value[newpos] == ')':
# erase right sign, but don't move cursor; (matching left sign handled later)
newstr = value[:newpos] + ' '
else:
# no deletion; just move cursor
newstr = value
else:
# no deletion; just move cursor
newstr = value
else:
if erase_to > end: erase_to = end
erase_len = erase_to - newpos
left = value[start:newpos]
## dbg("retained ='%s'" % value[erase_to:end], 'sel_to:', sel_to, "fill: '%s'" % self._template[end - erase_len:end])
right = value[erase_to:end] + self._template[end-erase_len:end]
pos_adjust = 0
if field._alignRight:
rstripped = right.rstrip()
if rstripped != right:
pos_adjust = len(right) - len(rstripped)
right = rstripped
if not field._insertRight and value[-1] == ')' and end == self._masklength - 1:
# need to shift ) into the field:
right = right[:-1] + ')'
value = value[:-1] + ' '
newfield = left+right
if pos_adjust:
newfield = newfield.rjust(end-start)
newpos += pos_adjust
## dbg("left='%s', right ='%s', newfield='%s'" %(left, right, newfield))
newstr = value[:start] + newfield + value[end:]
pos = newpos
else:
if sel_start == sel_to:
## dbg("current sel_start, sel_to:", sel_start, sel_to)
if key == wx.WXK_BACK:
sel_start, sel_to = sel_to-1, sel_to-1
## dbg("new sel_start, sel_to:", sel_start, sel_to)
if field._padZero and not value[start:sel_to].replace('0', '').replace(' ','').replace(field._fillChar, ''):
# preceding chars (if any) are zeros, blanks or fillchar; new char should be 0:
newchar = '0'
else:
newchar = self._template[sel_to] ## get an original template character to "clear" the current char
## dbg('value = "%s"' % value, 'value[%d] = "%s"' %(sel_start, value[sel_start]))
if self._isTemplateChar(sel_to):
if sel_to == 0 and self._signOk and value[sel_to] == '-': # erasing "template" sign char
newstr = ' ' + value[1:]
sel_to += 1
elif self._signOk and self._useParens and (value[sel_to] == ')' or value[sel_to] == '('):
# allow "change sign" by removing both parens:
newstr = value[:self._signpos] + ' ' + value[self._signpos+1:-1] + ' '
else:
newstr = value
newpos = sel_to
else:
if field._insertRight and sel_start == sel_to:
# force non-insert-right behavior, by selecting char to be replaced:
sel_to += 1
newstr, ignore = self._insertKey(newchar, sel_start, sel_start, sel_to, value)
else:
# selection made
newstr = self._eraseSelection(value, sel_start, sel_to)
pos = sel_start # put cursor back at beginning of selection
if self._signOk and self._useParens:
# account for resultant unbalanced parentheses:
left_signpos = newstr.find('(')
right_signpos = newstr.find(')')
if left_signpos == -1 and right_signpos != -1:
# erased left-sign marker; get rid of right sign marker:
newstr = newstr[:right_signpos] + ' ' + newstr[right_signpos+1:]
elif left_signpos != -1 and right_signpos == -1:
# erased right-sign marker; get rid of left-sign marker:
newstr = newstr[:left_signpos] + ' ' + newstr[left_signpos+1:]
## dbg("oldstr:'%s'" % value, 'oldpos:', oldstart)
## dbg("newstr:'%s'" % newstr, 'pos:', pos)
# if erasure results in an invalid field, disallow it:
## dbg('field._validRequired?', field._validRequired)
## dbg('field.IsValid("%s")?' % newstr[start:end], field.IsValid(newstr[start:end]))
if field._validRequired and not field.IsValid(newstr[start:end]):
if not wx.Validator_IsSilent():
wx.Bell()
## dbg(indent=0)
return False
# if erasure results in an invalid value, disallow it:
if self._ctrl_constraints._validRequired and not self.IsValid(newstr):
if not wx.Validator_IsSilent():
wx.Bell()
## dbg(indent=0)
return False
if just_return_value:
## dbg(indent=0)
return newstr
# else...
## dbg('setting value (later) to', newstr)
wx.CallAfter(self._SetValue, newstr)
## dbg('setting insertion point (later) to', pos)
wx.CallAfter(self._SetInsertionPoint, pos)
## dbg(indent=0)
if newstr != value:
self.modified = True
return False
def _OnEnd(self,event):
""" Handles End keypress in control. Should return False to skip other processing. """
## dbg("MaskedEditMixin::_OnEnd", indent=1)
pos = self._adjustPos(self._GetInsertionPoint(), event.GetKeyCode())
if not event.ControlDown():
end = self._masklength # go to end of control
if self._signOk and self._useParens:
end = end - 1 # account for reserved char at end
else:
end_of_input = self._goEnd(getPosOnly=True)
sel_start, sel_to = self._GetSelection()
if sel_to < pos: sel_to = pos
field = self._FindField(sel_to)
field_end = self._FindField(end_of_input)
# pick different end point if either:
# - cursor not in same field
# - or at or past last input already
# - or current selection = end of current field:
#### dbg('field != field_end?', field != field_end)
#### dbg('sel_to >= end_of_input?', sel_to >= end_of_input)
if field != field_end or sel_to >= end_of_input:
edit_start, edit_end = field._extent
#### dbg('edit_end:', edit_end)
#### dbg('sel_to:', sel_to)
#### dbg('sel_to == edit_end?', sel_to == edit_end)
#### dbg('field._index < self._field_indices[-1]?', field._index < self._field_indices[-1])
if sel_to == edit_end and field._index < self._field_indices[-1]:
edit_start, edit_end = self._FindFieldExtent(self._findNextEntry(edit_end)) # go to end of next field:
end = edit_end
## dbg('end moved to', end)
elif sel_to == edit_end and field._index == self._field_indices[-1]:
# already at edit end of last field; select to end of control:
end = self._masklength
## dbg('end moved to', end)
else:
end = edit_end # select to end of current field
## dbg('end moved to ', end)
else:
# select to current end of input
end = end_of_input
#### dbg('pos:', pos, 'end:', end)
if event.ShiftDown():
if not event.ControlDown():
## dbg("shift-end; select to end of control")
pass
else:
## dbg("shift-ctrl-end; select to end of non-whitespace")
pass
wx.CallAfter(self._SetInsertionPoint, pos)
wx.CallAfter(self._SetSelection, pos, end)
else:
if not event.ControlDown():
## dbg('go to end of control:')
pass
wx.CallAfter(self._SetInsertionPoint, end)
wx.CallAfter(self._SetSelection, end, end)
## dbg(indent=0)
return False
def _OnReturn(self, event):
"""
Swallows the return, issues a Navigate event instead, since
masked controls are "single line" by defn.
"""
## dbg('MaskedEditMixin::OnReturn')
self.Navigate(True)
return False
def _OnHome(self,event):
""" Handles Home keypress in control. Should return False to skip other processing."""
## dbg("MaskedEditMixin::_OnHome", indent=1)
pos = self._adjustPos(self._GetInsertionPoint(), event.GetKeyCode())
sel_start, sel_to = self._GetSelection()
# There are 5 cases here:
# 1) shift: select from start of control to end of current
# selection.
if event.ShiftDown() and not event.ControlDown():
## dbg("shift-home; select to start of control")
start = 0
end = sel_start
# 2) no shift, no control: move cursor to beginning of control.
elif not event.ControlDown():
## dbg("home; move to start of control")
start = 0
end = 0
# 3) No shift, control: move cursor back to beginning of field; if
# there already, go to beginning of previous field.
# 4) shift, control, start of selection not at beginning of control:
# move sel_start back to start of field; if already there, go to
# start of previous field.
elif( event.ControlDown()
and (not event.ShiftDown()
or (event.ShiftDown() and sel_start > 0) ) ):
if len(self._field_indices) > 1:
field = self._FindField(sel_start)
start, ignore = field._extent
if sel_start == start and field._index != self._field_indices[0]: # go to start of previous field:
start, ignore = self._FindFieldExtent(sel_start-1)
elif sel_start == start:
start = 0 # go to literal beginning if edit start
# not at that point
end_of_field = True
else:
start = 0
if not event.ShiftDown():
## dbg("ctrl-home; move to beginning of field")
end = start
else:
## dbg("shift-ctrl-home; select to beginning of field")
end = sel_to
else:
# 5) shift, control, start of selection at beginning of control:
# unselect by moving sel_to backward to beginning of current field;
# if already there, move to start of previous field.
start = sel_start
if len(self._field_indices) > 1:
# find end of previous field:
field = self._FindField(sel_to)
if sel_to > start and field._index != self._field_indices[0]:
ignore, end = self._FindFieldExtent(field._extent[0]-1)
else:
end = start
end_of_field = True
else:
end = start
end_of_field = False
## dbg("shift-ctrl-home; unselect to beginning of field")
## dbg('queuing new sel_start, sel_to:', (start, end))
wx.CallAfter(self._SetInsertionPoint, start)
wx.CallAfter(self._SetSelection, start, end)
## dbg(indent=0)
return False
def _OnChangeField(self, event):
"""
Primarily handles TAB events, but can be used for any key that
designer wants to change fields within a masked edit control.
"""
## dbg('MaskedEditMixin::_OnChangeField', indent = 1)
# determine end of current field:
pos = self._GetInsertionPoint()
## dbg('current pos:', pos)
sel_start, sel_to = self._GetSelection()
if self._masklength < 0: # no fields; process tab normally
self._AdjustField(pos)
if event.GetKeyCode() == wx.WXK_TAB:
## dbg('tab to next ctrl')
# As of 2.5.2, you don't call event.Skip() to do
# this, but instead force explicit navigation, if
# wx.TE_PROCESS_TAB is used (like in the masked edits)
self.Navigate(True)
#else: do nothing
## dbg(indent=0)
return False
field = self._FindField(sel_to)
index = field._index
field_start, field_end = field._extent
slice = self._GetValue()[field_start:field_end]
## dbg('field._stopFieldChangeIfInvalid?', field._stopFieldChangeIfInvalid)
## dbg('field.IsValid(slice)?', field.IsValid(slice))
if field._stopFieldChangeIfInvalid and not field.IsValid(slice):
## dbg('field invalid; field change disallowed')
if not wx.Validator_IsSilent():
wx.Bell()
## dbg(indent=0)
return False
if event.ShiftDown():
# "Go backward"
# NOTE: doesn't yet work with SHIFT-tab under wx; the control
# never sees this event! (But I've coded for it should it ever work,
# and it *does* work for '.' in IpAddrCtrl.)
if pos < field_start:
## dbg('cursor before 1st field; cannot change to a previous field')
if not wx.Validator_IsSilent():
wx.Bell()
## dbg(indent=0)
return False
if event.ControlDown():
## dbg('queuing select to beginning of field:', field_start, pos)
wx.CallAfter(self._SetInsertionPoint, field_start)
wx.CallAfter(self._SetSelection, field_start, pos)
## dbg(indent=0)
return False
elif index == 0:
# We're already in the 1st field; process shift-tab normally:
self._AdjustField(pos)
if event.GetKeyCode() == wx.WXK_TAB:
## dbg('tab to previous ctrl')
# As of 2.5.2, you don't call event.Skip() to do
# this, but instead force explicit navigation, if
# wx.TE_PROCESS_TAB is used (like in the masked edits)
self.Navigate(False)
else:
## dbg('position at beginning')
wx.CallAfter(self._SetInsertionPoint, field_start)
## dbg(indent=0)
return False
else:
# find beginning of previous field:
begin_prev = self._FindField(field_start-1)._extent[0]
self._AdjustField(pos)
## dbg('repositioning to', begin_prev)
wx.CallAfter(self._SetInsertionPoint, begin_prev)
if self._FindField(begin_prev)._selectOnFieldEntry:
edit_start, edit_end = self._FindFieldExtent(begin_prev)
## dbg('queuing selection to (%d, %d)' % (edit_start, edit_end))
wx.CallAfter(self._SetInsertionPoint, edit_start)
wx.CallAfter(self._SetSelection, edit_start, edit_end)
## dbg(indent=0)
return False
else:
# "Go forward"
if event.ControlDown():
## dbg('queuing select to end of field:', pos, field_end)
wx.CallAfter(self._SetInsertionPoint, pos)
wx.CallAfter(self._SetSelection, pos, field_end)
## dbg(indent=0)
return False
else:
if pos < field_start:
## dbg('cursor before 1st field; go to start of field')
wx.CallAfter(self._SetInsertionPoint, field_start)
if field._selectOnFieldEntry:
wx.CallAfter(self._SetSelection, field_end, field_start)
else:
wx.CallAfter(self._SetSelection, field_start, field_start)
return False
# else...
## dbg('end of current field:', field_end)
## dbg('go to next field')
if field_end == self._fields[self._field_indices[-1]]._extent[1]:
self._AdjustField(pos)
if event.GetKeyCode() == wx.WXK_TAB:
## dbg('tab to next ctrl')
# As of 2.5.2, you don't call event.Skip() to do
# this, but instead force explicit navigation, if
# wx.TE_PROCESS_TAB is used (like in the masked edits)
self.Navigate(True)
else:
## dbg('position at end')
wx.CallAfter(self._SetInsertionPoint, field_end)
## dbg(indent=0)
return False
else:
# we have to find the start of the next field
next_pos = self._findNextEntry(field_end)
if next_pos == field_end:
## dbg('already in last field')
self._AdjustField(pos)
if event.GetKeyCode() == wx.WXK_TAB:
## dbg('tab to next ctrl')
# As of 2.5.2, you don't call event.Skip() to do
# this, but instead force explicit navigation, if
# wx.TE_PROCESS_TAB is used (like in the masked edits)
self.Navigate(True)
#else: do nothing
## dbg(indent=0)
return False
else:
self._AdjustField( pos )
# move cursor to appropriate point in the next field and select as necessary:
field = self._FindField(next_pos)
edit_start, edit_end = field._extent
if field._selectOnFieldEntry:
## dbg('move to ', next_pos)
wx.CallAfter(self._SetInsertionPoint, next_pos)
edit_start, edit_end = self._FindFieldExtent(next_pos)
## dbg('queuing select', edit_start, edit_end)
wx.CallAfter(self._SetSelection, edit_end, edit_start)
else:
if field._insertRight:
next_pos = field._extent[1]
## dbg('move to ', next_pos)
wx.CallAfter(self._SetInsertionPoint, next_pos)
## dbg(indent=0)
return False
## dbg(indent=0)
def _OnDecimalPoint(self, event):
## dbg('MaskedEditMixin::_OnDecimalPoint', indent=1)
field = self._FindField(self._GetInsertionPoint())
start, end = field._extent
slice = self._GetValue()[start:end]
if field._stopFieldChangeIfInvalid and not field.IsValid(slice):
if not wx.Validator_IsSilent():
wx.Bell()
return False
pos = self._adjustPos(self._GetInsertionPoint(), event.GetKeyCode())
if self._isFloat: ## handle float value, move to decimal place
## dbg('key == Decimal tab; decimal pos:', self._decimalpos)
value = self._GetValue()
if pos < self._decimalpos:
clipped_text = value[0:pos] + self._decimalChar + value[self._decimalpos+1:]
## dbg('value: "%s"' % self._GetValue(), "clipped_text:'%s'" % clipped_text)
newstr = self._adjustFloat(clipped_text)
else:
newstr = self._adjustFloat(value)
wx.CallAfter(self._SetValue, newstr)
fraction = self._fields[1]
start, end = fraction._extent
wx.CallAfter(self._SetInsertionPoint, start)
if fraction._selectOnFieldEntry:
## dbg('queuing selection after decimal point to:', (start, end))
wx.CallAfter(self._SetSelection, end, start)
else:
wx.CallAfter(self._SetSelection, start, start)
keep_processing = False
if self._isInt: ## handle integer value, truncate from current position
## dbg('key == Integer decimal event')
value = self._GetValue()
clipped_text = value[0:pos]
## dbg('value: "%s"' % self._GetValue(), "clipped_text:'%s'" % clipped_text)
newstr = self._adjustInt(clipped_text)
## dbg('newstr: "%s"' % newstr)
wx.CallAfter(self._SetValue, newstr)
newpos = len(newstr.rstrip())
if newstr.find(')') != -1:
newpos -= 1 # (don't move past right paren)
wx.CallAfter(self._SetInsertionPoint, newpos)
wx.CallAfter(self._SetSelection, newpos, newpos)
keep_processing = False
## dbg(indent=0)
def _OnChangeSign(self, event):
## dbg('MaskedEditMixin::_OnChangeSign', indent=1)
key = event.GetKeyCode()
pos = self._adjustPos(self._GetInsertionPoint(), key)
value = self._eraseSelection()
integer = self._fields[0]
start, end = integer._extent
sel_start, sel_to = self._GetSelection()
#### dbg('adjusted pos:', pos)
if chr(key) in ('-','+','(', ')') or (chr(key) == " " and pos == self._signpos):
cursign = self._isNeg
## dbg('cursign:', cursign)
if chr(key) in ('-','(', ')'):
if sel_start <= self._signpos:
self._isNeg = True
else:
self._isNeg = (not self._isNeg) ## flip value
else:
self._isNeg = False
## dbg('isNeg?', self._isNeg)
text, self._signpos, self._right_signpos = self._getSignedValue(candidate=value)
## dbg('text:"%s"' % text, 'signpos:', self._signpos, 'right_signpos:', self._right_signpos)
if text is None:
text = value
if self._isNeg and self._signpos is not None and self._signpos != -1:
if self._useParens and self._right_signpos is not None:
text = text[:self._signpos] + '(' + text[self._signpos+1:self._right_signpos] + ')' + text[self._right_signpos+1:]
else:
text = text[:self._signpos] + '-' + text[self._signpos+1:]
else:
#### dbg('self._isNeg?', self._isNeg, 'self.IsValid(%s)' % text, self.IsValid(text))
if self._useParens:
text = text[:self._signpos] + ' ' + text[self._signpos+1:self._right_signpos] + ' ' + text[self._right_signpos+1:]
else:
text = text[:self._signpos] + ' ' + text[self._signpos+1:]
## dbg('clearing self._isNeg')
self._isNeg = False
wx.CallAfter(self._SetValue, text)
wx.CallAfter(self._applyFormatting)
## dbg('pos:', pos, 'signpos:', self._signpos)
if pos == self._signpos or integer.IsEmpty(text[start:end]):
wx.CallAfter(self._SetInsertionPoint, self._signpos+1)
else:
wx.CallAfter(self._SetInsertionPoint, pos)
keep_processing = False
else:
keep_processing = True
## dbg(indent=0)
return keep_processing
def _OnGroupChar(self, event):
"""
This handler is only registered if the mask is a numeric mask.
It allows the insertion of ',' or '.' if appropriate.
"""
## dbg('MaskedEditMixin::_OnGroupChar', indent=1)
keep_processing = True
pos = self._adjustPos(self._GetInsertionPoint(), event.GetKeyCode())
sel_start, sel_to = self._GetSelection()
groupchar = self._fields[0]._groupChar
if not self._isCharAllowed(groupchar, pos, checkRegex=True):
keep_processing = False
if not wx.Validator_IsSilent():
wx.Bell()
if keep_processing:
newstr, newpos = self._insertKey(groupchar, pos, sel_start, sel_to, self._GetValue() )
## dbg("str with '%s' inserted:" % groupchar, '"%s"' % newstr)
if self._ctrl_constraints._validRequired and not self.IsValid(newstr):
keep_processing = False
if not wx.Validator_IsSilent():
wx.Bell()
if keep_processing:
wx.CallAfter(self._SetValue, newstr)
wx.CallAfter(self._SetInsertionPoint, newpos)
keep_processing = False
## dbg(indent=0)
return keep_processing
def _findNextEntry(self,pos, adjustInsert=True):
""" Find the insertion point for the next valid entry character position."""
## dbg('MaskedEditMixin::_findNextEntry', indent=1)
if self._isTemplateChar(pos) or pos in self._explicit_field_boundaries: # if changing fields, pay attn to flag
adjustInsert = adjustInsert
else: # else within a field; flag not relevant
adjustInsert = False
while self._isTemplateChar(pos) and pos < self._masklength:
pos += 1
# if changing fields, and we've been told to adjust insert point,
# look at new field; if empty and right-insert field,
# adjust to right edge:
if adjustInsert and pos < self._masklength:
field = self._FindField(pos)
start, end = field._extent
slice = self._GetValue()[start:end]
if field._insertRight and field.IsEmpty(slice):
pos = end
## dbg('final pos:', pos, indent=0)
return pos
def _findNextTemplateChar(self, pos):
""" Find the position of the next non-editable character in the mask."""
while not self._isTemplateChar(pos) and pos < self._masklength:
pos += 1
return pos
def _OnAutoCompleteField(self, event):
## dbg('MaskedEditMixin::_OnAutoCompleteField', indent =1)
pos = self._GetInsertionPoint()
field = self._FindField(pos)
edit_start, edit_end, slice = self._FindFieldExtent(pos, getslice=True)
match_index = None
keycode = event.GetKeyCode()
if field._fillChar != ' ':
text = slice.replace(field._fillChar, '')
else:
text = slice
text = text.strip()
keep_processing = True # (assume True to start)
## dbg('field._hasList?', field._hasList)
if field._hasList:
## dbg('choices:', field._choices)
## dbg('compareChoices:', field._compareChoices)
choices, choice_required = field._compareChoices, field._choiceRequired
if keycode in (wx.WXK_PRIOR, wx.WXK_UP, wx.WXK_NUMPAD_PRIOR, wx.WXK_NUMPAD_UP):
direction = -1
else:
direction = 1
match_index, partial_match = self._autoComplete(direction, choices, text, compareNoCase=field._compareNoCase, current_index = field._autoCompleteIndex)
if( match_index is None
and (keycode in self._autoCompleteKeycodes + [wx.WXK_PRIOR, wx.WXK_NEXT, wx.WXK_NUMPAD_PRIOR, wx.WXK_NUMPAD_NEXT]
or (keycode in [wx.WXK_UP, wx.WXK_DOWN, wx.WXK_NUMPAD_UP, wx.WXK_NUMPAD_DOWN] and event.ShiftDown() ) ) ):
# Select the 1st thing from the list:
match_index = 0
if( match_index is not None
and ( keycode in self._autoCompleteKeycodes + [wx.WXK_PRIOR, wx.WXK_NEXT, wx.WXK_NUMPAD_PRIOR, wx.WXK_NUMPAD_NEXT]
or (keycode in [wx.WXK_UP, wx.WXK_DOWN, wx.WXK_NUMPAD_UP, wx.WXK_NUMPAD_DOWN] and event.ShiftDown())
or (keycode in [wx.WXK_DOWN, wx.WXK_NUMPAD_DOWN] and partial_match) ) ):
# We're allowed to auto-complete:
## dbg('match found')
value = self._GetValue()
newvalue = value[:edit_start] + field._choices[match_index] + value[edit_end:]
## dbg('setting value to "%s"' % newvalue)
self._SetValue(newvalue)
self._SetInsertionPoint(min(edit_end, len(newvalue.rstrip())))
self._OnAutoSelect(field, match_index)
self._CheckValid() # recolor as appopriate
if keycode in (wx.WXK_UP, wx.WXK_DOWN, wx.WXK_LEFT, wx.WXK_RIGHT,
wx.WXK_NUMPAD_UP, wx.WXK_NUMPAD_DOWN, wx.WXK_NUMPAD_LEFT, wx.WXK_NUMPAD_RIGHT):
# treat as left right arrow if unshifted, tab/shift tab if shifted.
if event.ShiftDown():
if keycode in (wx.WXK_DOWN, wx.WXK_RIGHT, wx.WXK_NUMPAD_DOWN, wx.WXK_NUMPAD_RIGHT):
# remove "shifting" and treat as (forward) tab:
event.m_shiftDown = False
keep_processing = self._OnChangeField(event)
else:
keep_processing = self._OnArrow(event)
# else some other key; keep processing the key
## dbg('keep processing?', keep_processing, indent=0)
return keep_processing
def _OnAutoSelect(self, field, match_index = None):
"""
Function called if autoselect feature is enabled and entire control
is selected:
"""
## dbg('MaskedEditMixin::OnAutoSelect', field._index)
if match_index is not None:
field._autoCompleteIndex = match_index
def _autoComplete(self, direction, choices, value, compareNoCase, current_index):
"""
This function gets called in response to Auto-complete events.
It attempts to find a match to the specified value against the
list of choices; if exact match, the index of then next
appropriate value in the list, based on the given direction.
If not an exact match, it will return the index of the 1st value from
the choice list for which the partial value can be extended to match.
If no match found, it will return None.
The function returns a 2-tuple, with the 2nd element being a boolean
that indicates if partial match was necessary.
"""
## dbg('autoComplete(direction=', direction, 'choices=',choices, 'value=',value,'compareNoCase?', compareNoCase, 'current_index:', current_index, indent=1)
if value is None:
## dbg('nothing to match against', indent=0)
return (None, False)
partial_match = False
if compareNoCase:
value = value.lower()
last_index = len(choices) - 1
if value in choices:
## dbg('"%s" in', choices)
if current_index is not None and choices[current_index] == value:
index = current_index
else:
index = choices.index(value)
## dbg('matched "%s" (%d)' % (choices[index], index))
if direction == -1:
## dbg('going to previous')
if index == 0: index = len(choices) - 1
else: index -= 1
else:
if index == len(choices) - 1: index = 0
else: index += 1
## dbg('change value to "%s" (%d)' % (choices[index], index))
match = index
else:
partial_match = True
value = value.strip()
## dbg('no match; try to auto-complete:')
match = None
## dbg('searching for "%s"' % value)
if current_index is None:
indices = range(len(choices))
if direction == -1:
indices.reverse()
else:
if direction == 1:
indices = range(current_index +1, len(choices)) + range(current_index+1)
## dbg('range(current_index+1 (%d), len(choices) (%d)) + range(%d):' % (current_index+1, len(choices), current_index+1), indices)
else:
indices = range(current_index-1, -1, -1) + range(len(choices)-1, current_index-1, -1)
## dbg('range(current_index-1 (%d), -1) + range(len(choices)-1 (%d)), current_index-1 (%d):' % (current_index-1, len(choices)-1, current_index-1), indices)
#### dbg('indices:', indices)
for index in indices:
choice = choices[index]
if choice.find(value, 0) == 0:
## dbg('match found:', choice)
match = index
break
else:
## dbg('choice: "%s" - no match' % choice)
pass
if match is not None:
## dbg('matched', match)
pass
else:
## dbg('no match found')
pass
## dbg(indent=0)
return (match, partial_match)
def _AdjustField(self, pos):
"""
This function gets called by default whenever the cursor leaves a field.
The pos argument given is the char position before leaving that field.
By default, floating point, integer and date values are adjusted to be
legal in this function. Derived classes may override this function
to modify the value of the control in a different way when changing fields.
NOTE: these change the value immediately, and restore the cursor to
the passed location, so that any subsequent code can then move it
based on the operation being performed.
"""
newvalue = value = self._GetValue()
field = self._FindField(pos)
start, end, slice = self._FindFieldExtent(getslice=True)
newfield = field._AdjustField(slice)
newvalue = value[:start] + newfield + value[end:]
if self._isFloat and newvalue != self._template:
newvalue = self._adjustFloat(newvalue)
if self._ctrl_constraints._isInt and value != self._template:
newvalue = self._adjustInt(value)
if self._isDate and value != self._template:
newvalue = self._adjustDate(value, fixcentury=True)
if self._4digityear:
year2dig = self._dateExtent - 2
if pos == year2dig and value[year2dig] != newvalue[year2dig]:
pos = pos+2
if newvalue != value:
## dbg('old value: "%s"\nnew value: "%s"' % (value, newvalue))
self._SetValue(newvalue)
self._SetInsertionPoint(pos)
def _adjustKey(self, pos, key):
""" Apply control formatting to the key (e.g. convert to upper etc). """
field = self._FindField(pos)
if field._forceupper and key in range(97,123):
key = ord( chr(key).upper())
if field._forcelower and key in range(65,90):
key = ord( chr(key).lower())
return key
def _adjustPos(self, pos, key):
"""
Checks the current insertion point position and adjusts it if
necessary to skip over non-editable characters.
"""
## dbg('_adjustPos', pos, key, indent=1)
sel_start, sel_to = self._GetSelection()
# If a numeric or decimal mask, and negatives allowed, reserve the
# first space for sign, and last one if using parens.
if( self._signOk
and ((pos == self._signpos and key in (ord('-'), ord('+'), ord(' ')) )
or (self._useParens and pos == self._masklength -1))):
## dbg('adjusted pos:', pos, indent=0)
return pos
if key not in self._nav:
field = self._FindField(pos)
## dbg('field._insertRight?', field._insertRight)
## if self._signOk: dbg('self._signpos:', self._signpos)
if field._insertRight: # if allow right-insert
start, end = field._extent
slice = self._GetValue()[start:end].strip()
field_len = end - start
if pos == end: # if cursor at right edge of field
# if not filled or supposed to stay in field, keep current position
#### dbg('pos==end')
#### dbg('len (slice):', len(slice))
#### dbg('field_len?', field_len)
#### dbg('pos==end; len (slice) < field_len?', len(slice) < field_len)
#### dbg('not field._moveOnFieldFull?', not field._moveOnFieldFull)
if( len(slice) == field_len and field._moveOnFieldFull
and (not field._stopFieldChangeIfInvalid or
field._stopFieldChangeIfInvalid and field.IsValid(slice))):
# move cursor to next field:
pos = self._findNextEntry(pos)
self._SetInsertionPoint(pos)
if pos < sel_to:
self._SetSelection(pos, sel_to) # restore selection
else:
self._SetSelection(pos, pos) # remove selection
else: # leave cursor alone
pass
else:
# if at start of control, move to right edge
if (sel_to == sel_start
and (self._isTemplateChar(pos) or (pos == start and len(slice)+ 1 < field_len))
and pos != end):
pos = end # move to right edge
## elif sel_start <= start and sel_to == end:
## # select to right edge of field - 1 (to replace char)
## pos = end - 1
## self._SetInsertionPoint(pos)
## # restore selection
## self._SetSelection(sel_start, pos)
# if selected to beginning and signed, and not changing sign explicitly:
elif self._signOk and sel_start == 0 and key not in (ord('-'), ord('+'), ord(' ')):
# adjust to past reserved sign position:
pos = self._fields[0]._extent[0]
## dbg('adjusting field to ', pos)
self._SetInsertionPoint(pos)
# but keep original selection, to allow replacement of any sign:
self._SetSelection(0, sel_to)
else:
pass # leave position/selection alone
# else make sure the user is not trying to type over a template character
# If they are, move them to the next valid entry position
elif self._isTemplateChar(pos):
if( (not field._moveOnFieldFull
and (not self._signOk
or (self._signOk and field._index == 0 and pos > 0) ) )
or (field._stopFieldChangeIfInvalid
and not field.IsValid(self._GetValue()[start:end]) ) ):
# don't move to next field without explicit cursor movement
pass
else:
# find next valid position
pos = self._findNextEntry(pos)
self._SetInsertionPoint(pos)
if pos < sel_to: # restore selection
self._SetSelection(pos, sel_to)
else:
self._SetSelection(pos, pos)
## dbg('adjusted pos:', pos, indent=0)
return pos
def _adjustFloat(self, candidate=None):
"""
'Fixes' an floating point control. Collapses spaces, right-justifies, etc.
"""
## dbg('MaskedEditMixin::_adjustFloat, candidate = "%s"' % candidate, indent=1)
lenInt,lenFraction = [len(s) for s in self._mask.split('.')] ## Get integer, fraction lengths
if candidate is None: value = self._GetValue()
else: value = candidate
## dbg('value = "%(value)s"' % locals(), 'len(value):', len(value))
intStr, fracStr = value.split(self._decimalChar)
intStr = self._fields[0]._AdjustField(intStr)
## dbg('adjusted intStr: "%s"' % intStr)
lenInt = len(intStr)
fracStr = fracStr + ('0'*(lenFraction-len(fracStr))) # add trailing spaces to decimal
## dbg('intStr "%(intStr)s"' % locals())
## dbg('lenInt:', lenInt)
intStr = string.rjust( intStr[-lenInt:], lenInt)
## dbg('right-justifed intStr = "%(intStr)s"' % locals())
newvalue = intStr + self._decimalChar + fracStr
if self._signOk:
if len(newvalue) < self._masklength:
newvalue = ' ' + newvalue
signedvalue = self._getSignedValue(newvalue)[0]
if signedvalue is not None: newvalue = signedvalue
# Finally, align string with decimal position, left-padding with
# fillChar:
newdecpos = newvalue.find(self._decimalChar)
if newdecpos < self._decimalpos:
padlen = self._decimalpos - newdecpos
newvalue = string.join([' ' * padlen] + [newvalue] ,'')
if self._signOk and self._useParens:
if newvalue.find('(') != -1:
newvalue = newvalue[:-1] + ')'
else:
newvalue = newvalue[:-1] + ' '
## dbg('newvalue = "%s"' % newvalue)
if candidate is None:
wx.CallAfter(self._SetValue, newvalue)
## dbg(indent=0)
return newvalue
def _adjustInt(self, candidate=None):
""" 'Fixes' an integer control. Collapses spaces, right or left-justifies."""
## dbg("MaskedEditMixin::_adjustInt", candidate)
lenInt = self._masklength
if candidate is None: value = self._GetValue()
else: value = candidate
intStr = self._fields[0]._AdjustField(value)
intStr = intStr.strip() # drop extra spaces
## dbg('adjusted field: "%s"' % intStr)
if self._isNeg and intStr.find('-') == -1 and intStr.find('(') == -1:
if self._useParens:
intStr = '(' + intStr + ')'
else:
intStr = '-' + intStr
elif self._isNeg and intStr.find('-') != -1 and self._useParens:
intStr = intStr.replace('-', '(')
if( self._signOk and ((self._useParens and intStr.find('(') == -1)
or (not self._useParens and intStr.find('-') == -1))):
intStr = ' ' + intStr
if self._useParens:
intStr += ' ' # space for right paren position
elif self._signOk and self._useParens and intStr.find('(') != -1 and intStr.find(')') == -1:
# ensure closing right paren:
intStr += ')'
if self._fields[0]._alignRight: ## Only if right-alignment is enabled
intStr = intStr.rjust( lenInt )
else:
intStr = intStr.ljust( lenInt )
if candidate is None:
wx.CallAfter(self._SetValue, intStr )
return intStr
def _adjustDate(self, candidate=None, fixcentury=False, force4digit_year=False):
"""
'Fixes' a date control, expanding the year if it can.
Applies various self-formatting options.
"""
## dbg("MaskedEditMixin::_adjustDate", indent=1)
if candidate is None: text = self._GetValue()
else: text = candidate
## dbg('text=', text)
if self._datestyle == "YMD":
year_field = 0
else:
year_field = 2
## dbg('getYear: "%s"' % _getYear(text, self._datestyle))
year = string.replace( _getYear( text, self._datestyle),self._fields[year_field]._fillChar,"") # drop extra fillChars
month = _getMonth( text, self._datestyle)
day = _getDay( text, self._datestyle)
## dbg('self._datestyle:', self._datestyle, 'year:', year, 'Month', month, 'day:', day)
yearVal = None
yearstart = self._dateExtent - 4
if( len(year) < 4
and (fixcentury
or force4digit_year
or (self._GetInsertionPoint() > yearstart+1 and text[yearstart+2] == ' ')
or (self._GetInsertionPoint() > yearstart+2 and text[yearstart+3] == ' ') ) ):
## user entered less than four digits and changing fields or past point where we could
## enter another digit:
try:
yearVal = int(year)
except:
## dbg('bad year=', year)
year = text[yearstart:self._dateExtent]
if len(year) < 4 and yearVal:
if len(year) == 2:
# Fix year adjustment to be less "20th century" :-) and to adjust heuristic as the
# years pass...
now = wx.DateTime_Now()
century = (now.GetYear() /100) * 100 # "this century"
twodig_year = now.GetYear() - century # "this year" (2 digits)
# if separation between today's 2-digit year and typed value > 50,
# assume last century,
# else assume this century.
#
# Eg: if 2003 and yearVal == 30, => 2030
# if 2055 and yearVal == 80, => 2080
# if 2010 and yearVal == 96, => 1996
#
if abs(yearVal - twodig_year) > 50:
yearVal = (century - 100) + yearVal
else:
yearVal = century + yearVal
year = str( yearVal )
else: # pad with 0's to make a 4-digit year
year = "%04d" % yearVal
if self._4digityear or force4digit_year:
text = _makeDate(year, month, day, self._datestyle, text) + text[self._dateExtent:]
## dbg('newdate: "%s"' % text, indent=0)
return text
def _goEnd(self, getPosOnly=False):
""" Moves the insertion point to the end of user-entry """
## dbg("MaskedEditMixin::_goEnd; getPosOnly:", getPosOnly, indent=1)
text = self._GetValue()
#### dbg('text: "%s"' % text)
i = 0
if len(text.rstrip()):
for i in range( min( self._masklength-1, len(text.rstrip())), -1, -1):
#### dbg('i:', i, 'self._isMaskChar(%d)' % i, self._isMaskChar(i))
if self._isMaskChar(i):
char = text[i]
#### dbg("text[%d]: '%s'" % (i, char))
if char != ' ':
i += 1
break
if i == 0:
pos = self._goHome(getPosOnly=True)
else:
pos = min(i,self._masklength)
field = self._FindField(pos)
start, end = field._extent
if field._insertRight and pos < end:
pos = end
## dbg('next pos:', pos)
## dbg(indent=0)
if getPosOnly:
return pos
else:
self._SetInsertionPoint(pos)
def _goHome(self, getPosOnly=False):
""" Moves the insertion point to the beginning of user-entry """
## dbg("MaskedEditMixin::_goHome; getPosOnly:", getPosOnly, indent=1)
text = self._GetValue()
for i in range(self._masklength):
if self._isMaskChar(i):
break
pos = max(i, 0)
## dbg(indent=0)
if getPosOnly:
return pos
else:
self._SetInsertionPoint(max(i,0))
def _getAllowedChars(self, pos):
""" Returns a string of all allowed user input characters for the provided
mask character plus control options
"""
maskChar = self.maskdict[pos]
okchars = self.maskchardict[maskChar] ## entry, get mask approved characters
# convert okchars to unicode if required; will force subsequent appendings to
# result in unicode strings
if 'unicode' in wx.PlatformInfo and type(okchars) != types.UnicodeType:
okchars = okchars.decode(self._defaultEncoding)
field = self._FindField(pos)
if okchars and field._okSpaces: ## Allow spaces?
okchars += " "
if okchars and field._includeChars: ## any additional included characters?
okchars += field._includeChars
#### dbg('okchars[%d]:' % pos, okchars)
return okchars
def _isMaskChar(self, pos):
""" Returns True if the char at position pos is a special mask character (e.g. NCXaA#)
"""
if pos < self._masklength:
return self._ismasked[pos]
else:
return False
def _isTemplateChar(self,Pos):
""" Returns True if the char at position pos is a template character (e.g. -not- NCXaA#)
"""
if Pos < self._masklength:
return not self._isMaskChar(Pos)
else:
return False
def _isCharAllowed(self, char, pos, checkRegex=False, allowAutoSelect=True, ignoreInsertRight=False):
""" Returns True if character is allowed at the specific position, otherwise False."""
## dbg('_isCharAllowed', char, pos, checkRegex, indent=1)
field = self._FindField(pos)
right_insert = False
if self.controlInitialized:
sel_start, sel_to = self._GetSelection()
else:
sel_start, sel_to = pos, pos
if (field._insertRight or self._ctrl_constraints._insertRight) and not ignoreInsertRight:
start, end = field._extent
field_len = end - start
if self.controlInitialized:
value = self._GetValue()
fstr = value[start:end].strip()
if field._padZero:
while fstr and fstr[0] == '0':
fstr = fstr[1:]
input_len = len(fstr)
if self._signOk and '-' in fstr or '(' in fstr:
input_len -= 1 # sign can move out of field, so don't consider it in length
else:
value = self._template
input_len = 0 # can't get the current "value", so use 0
# if entire field is selected or position is at end and field is not full,
# or if allowed to right-insert at any point in field and field is not full and cursor is not at a fillChar
# or the field is a singleton integer field and is currently 0 and we're at the end:
if( (sel_start, sel_to) == field._extent
or (pos == end and ((input_len < field_len)
or (field_len == 1
and input_len == field_len
and field._isInt
and value[end-1] == '0'
)
) ) ):
pos = end - 1
## dbg('pos = end - 1 = ', pos, 'right_insert? 1')
right_insert = True
elif( field._allowInsert and sel_start == sel_to
and (sel_to == end or (sel_to < self._masklength and value[sel_start] != field._fillChar))
and input_len < field_len ):
pos = sel_to - 1 # where character will go
## dbg('pos = sel_to - 1 = ', pos, 'right_insert? 1')
right_insert = True
# else leave pos alone...
else:
## dbg('pos stays ', pos, 'right_insert? 0')
pass
if self._isTemplateChar( pos ): ## if a template character, return empty
## dbg('%d is a template character; returning False' % pos, indent=0)
return False
if self._isMaskChar( pos ):
okChars = self._getAllowedChars(pos)
if self._fields[0]._groupdigits and (self._isInt or (self._isFloat and pos < self._decimalpos)):
okChars += self._fields[0]._groupChar
if self._signOk:
if self._isInt or (self._isFloat and pos < self._decimalpos):
okChars += '-'
if self._useParens:
okChars += '('
elif self._useParens and (self._isInt or (self._isFloat and pos > self._decimalpos)):
okChars += ')'
#### dbg('%s in %s?' % (char, okChars), char in okChars)
approved = (self.maskdict[pos] == '*' or char in okChars)
if approved and checkRegex:
## dbg("checking appropriate regex's")
value = self._eraseSelection(self._GetValue())
if right_insert:
# move the position to the right side of the insertion:
at = pos+1
else:
at = pos
if allowAutoSelect:
newvalue, ignore, ignore, ignore, ignore = self._insertKey(char, at, sel_start, sel_to, value, allowAutoSelect=True)
else:
newvalue, ignore = self._insertKey(char, at, sel_start, sel_to, value)
## dbg('newvalue: "%s"' % newvalue)
fields = [self._FindField(pos)] + [self._ctrl_constraints]
for field in fields: # includes fields[-1] == "ctrl_constraints"
if field._regexMask and field._filter:
## dbg('checking vs. regex')
start, end = field._extent
slice = newvalue[start:end]
approved = (re.match( field._filter, slice) is not None)
## dbg('approved?', approved)
if not approved: break
## dbg(indent=0)
return approved
else:
## dbg('%d is a !???! character; returning False', indent=0)
return False
def _applyFormatting(self):
""" Apply formatting depending on the control's state.
Need to find a way to call this whenever the value changes, in case the control's
value has been changed or set programatically.
"""
## dbg(suspend=1)
## dbg('MaskedEditMixin::_applyFormatting', indent=1)
# Handle negative numbers
if self._signOk:
text, signpos, right_signpos = self._getSignedValue()
## dbg('text: "%s", signpos:' % text, signpos)
if text and signpos != self._signpos:
self._signpos = signpos
if not text or text[signpos] not in ('-','('):
self._isNeg = False
## dbg('no valid sign found; new sign:', self._isNeg)
elif text and self._valid and not self._isNeg and text[signpos] in ('-', '('):
## dbg('setting _isNeg to True')
self._isNeg = True
## dbg('self._isNeg:', self._isNeg)
if self._signOk and self._isNeg:
fc = self._signedForegroundColour
else:
fc = self._foregroundColour
if hasattr(fc, '_name'):
c =fc._name
else:
c = fc
## dbg('setting foreground to', c)
self.SetForegroundColour(fc)
if self._valid:
## dbg('valid')
if self.IsEmpty():
bc = self._emptyBackgroundColour
else:
bc = self._validBackgroundColour
else:
## dbg('invalid')
bc = self._invalidBackgroundColour
if hasattr(bc, '_name'):
c =bc._name
else:
c = bc
## dbg('setting background to', c)
self.SetBackgroundColour(bc)
self._Refresh()
## dbg(indent=0, suspend=0)
def _getAbsValue(self, candidate=None):
""" Return an unsigned value (i.e. strip the '-' prefix if any), and sign position(s).
"""
## dbg('MaskedEditMixin::_getAbsValue; candidate="%s"' % candidate, indent=1)
if candidate is None: text = self._GetValue()
else: text = candidate
right_signpos = text.find(')')
if self._isInt:
if self._ctrl_constraints._alignRight and self._fields[0]._fillChar == ' ':
signpos = text.find('-')
if signpos == -1:
## dbg('no - found; searching for (')
signpos = text.find('(')
elif signpos != -1:
## dbg('- found at', signpos)
pass
if signpos == -1:
## dbg('signpos still -1')
## dbg('len(%s) (%d) < len(%s) (%d)?' % (text, len(text), self._mask, self._masklength), len(text) < self._masklength)
if len(text) < self._masklength:
text = ' ' + text
if len(text) < self._masklength:
text += ' '
if len(text) > self._masklength and text[-1] in (')', ' '):
text = text[:-1]
else:
## dbg('len(%s) (%d), len(%s) (%d)' % (text, len(text), self._mask, self._masklength))
## dbg('len(%s) - (len(%s) + 1):' % (text, text.lstrip()) , len(text) - (len(text.lstrip()) + 1))
signpos = len(text) - (len(text.lstrip()) + 1)
if self._useParens and not text.strip():
signpos -= 1 # empty value; use penultimate space
## dbg('signpos:', signpos)
if signpos >= 0:
text = text[:signpos] + ' ' + text[signpos+1:]
else:
if self._signOk:
signpos = 0
text = self._template[0] + text[1:]
else:
signpos = -1
if right_signpos != -1:
if self._signOk:
text = text[:right_signpos] + ' ' + text[right_signpos+1:]
elif len(text) > self._masklength:
text = text[:right_signpos] + text[right_signpos+1:]
right_signpos = -1
elif self._useParens and self._signOk:
# figure out where it ought to go:
right_signpos = self._masklength - 1 # initial guess
if not self._ctrl_constraints._alignRight:
## dbg('not right-aligned')
if len(text.strip()) == 0:
right_signpos = signpos + 1
elif len(text.strip()) < self._masklength:
right_signpos = len(text.rstrip())
## dbg('right_signpos:', right_signpos)
groupchar = self._fields[0]._groupChar
try:
value = long(text.replace(groupchar,'').replace('(','-').replace(')','').replace(' ', ''))
except:
## dbg('invalid number', indent=0)
return None, signpos, right_signpos
else: # float value
try:
groupchar = self._fields[0]._groupChar
value = float(text.replace(groupchar,'').replace(self._decimalChar, '.').replace('(', '-').replace(')','').replace(' ', ''))
## dbg('value:', value)
except:
value = None
if value < 0 and value is not None:
signpos = text.find('-')
if signpos == -1:
signpos = text.find('(')
text = text[:signpos] + self._template[signpos] + text[signpos+1:]
else:
# look forwards up to the decimal point for the 1st non-digit
## dbg('decimal pos:', self._decimalpos)
## dbg('text: "%s"' % text)
if self._signOk:
signpos = self._decimalpos - (len(text[:self._decimalpos].lstrip()) + 1)
# prevent checking for empty string - Tomo - Wed 14 Jan 2004 03:19:09 PM CET
if len(text) >= signpos+1 and text[signpos+1] in ('-','('):
signpos += 1
else:
signpos = -1
## dbg('signpos:', signpos)
if self._useParens:
if self._signOk:
right_signpos = self._masklength - 1
text = text[:right_signpos] + ' '
if text[signpos] == '(':
text = text[:signpos] + ' ' + text[signpos+1:]
else:
right_signpos = text.find(')')
if right_signpos != -1:
text = text[:-1]
right_signpos = -1
if value is None:
## dbg('invalid number')
text = None
## dbg('abstext = "%s"' % text, 'signpos:', signpos, 'right_signpos:', right_signpos)
## dbg(indent=0)
return text, signpos, right_signpos
def _getSignedValue(self, candidate=None):
""" Return a signed value by adding a "-" prefix if the value
is set to negative, or a space if positive.
"""
## dbg('MaskedEditMixin::_getSignedValue; candidate="%s"' % candidate, indent=1)
if candidate is None: text = self._GetValue()
else: text = candidate
abstext, signpos, right_signpos = self._getAbsValue(text)
if self._signOk:
if abstext is None:
## dbg(indent=0)
return abstext, signpos, right_signpos
if self._isNeg or text[signpos] in ('-', '('):
if self._useParens:
sign = '('
else:
sign = '-'
else:
sign = ' '
if abstext[signpos] not in string.digits:
text = abstext[:signpos] + sign + abstext[signpos+1:]
else:
# this can happen if value passed is too big; sign assumed to be
# in position 0, but if already filled with a digit, prepend sign...
text = sign + abstext
if self._useParens and text.find('(') != -1:
text = text[:right_signpos] + ')' + text[right_signpos+1:]
else:
text = abstext
## dbg('signedtext = "%s"' % text, 'signpos:', signpos, 'right_signpos', right_signpos)
## dbg(indent=0)
return text, signpos, right_signpos
def GetPlainValue(self, candidate=None):
""" Returns control's value stripped of the template text.
plainvalue = MaskedEditMixin.GetPlainValue()
"""
## dbg('MaskedEditMixin::GetPlainValue; candidate="%s"' % candidate, indent=1)
if candidate is None: text = self._GetValue()
else: text = candidate
if self.IsEmpty():
## dbg('returned ""', indent=0)
return ""
else:
plain = ""
for idx in range( min(len(self._template), len(text)) ):
if self._mask[idx] in maskchars:
plain += text[idx]
if self._isFloat or self._isInt:
## dbg('plain so far: "%s"' % plain)
plain = plain.replace('(', '-').replace(')', ' ')
## dbg('plain after sign regularization: "%s"' % plain)
if self._signOk and self._isNeg and plain.count('-') == 0:
# must be in reserved position; add to "plain value"
plain = '-' + plain.strip()
if self._fields[0]._alignRight:
lpad = plain.count(',')
plain = ' ' * lpad + plain.replace(',','')
else:
plain = plain.replace(',','')
## dbg('plain after pad and group:"%s"' % plain)
## dbg('returned "%s"' % plain.rstrip(), indent=0)
return plain.rstrip()
def IsEmpty(self, value=None):
"""
Returns True if control is equal to an empty value.
(Empty means all editable positions in the template == fillChar.)
"""
if value is None: value = self._GetValue()
if value == self._template and not self._defaultValue:
#### dbg("IsEmpty? 1 (value == self._template and not self._defaultValue)")
return True # (all mask chars == fillChar by defn)
elif value == self._template:
empty = True
for pos in range(len(self._template)):
#### dbg('isMaskChar(%(pos)d)?' % locals(), self._isMaskChar(pos))
#### dbg('value[%(pos)d] != self._fillChar?' %locals(), value[pos] != self._fillChar[pos])
if self._isMaskChar(pos) and value[pos] not in (' ', self._fillChar[pos]):
empty = False
#### dbg("IsEmpty? %(empty)d (do all mask chars == fillChar?)" % locals())
return empty
else:
#### dbg("IsEmpty? 0 (value doesn't match template)")
return False
def IsDefault(self, value=None):
"""
Returns True if the value specified (or the value of the control if not specified)
is equal to the default value.
"""
if value is None: value = self._GetValue()
return value == self._template
def IsValid(self, value=None):
""" Indicates whether the value specified (or the current value of the control
if not specified) is considered valid."""
#### dbg('MaskedEditMixin::IsValid("%s")' % value, indent=1)
if value is None: value = self._GetValue()
ret = self._CheckValid(value)
#### dbg(indent=0)
return ret
def _eraseSelection(self, value=None, sel_start=None, sel_to=None):
""" Used to blank the selection when inserting a new character. """
## dbg("MaskedEditMixin::_eraseSelection", indent=1)
if value is None: value = self._GetValue()
if sel_start is None or sel_to is None:
sel_start, sel_to = self._GetSelection() ## check for a range of selected text
## dbg('value: "%s"' % value)
## dbg("current sel_start, sel_to:", sel_start, sel_to)
newvalue = list(value)
for i in range(sel_start, sel_to):
if self._signOk and newvalue[i] in ('-', '(', ')'):
## dbg('found sign (%s) at' % newvalue[i], i)
# balance parentheses:
if newvalue[i] == '(':
right_signpos = value.find(')')
if right_signpos != -1:
newvalue[right_signpos] = ' '
elif newvalue[i] == ')':
left_signpos = value.find('(')
if left_signpos != -1:
newvalue[left_signpos] = ' '
newvalue[i] = ' '
elif self._isMaskChar(i):
field = self._FindField(i)
if field._padZero:
newvalue[i] = '0'
else:
newvalue[i] = self._template[i]
value = string.join(newvalue,"")
## dbg('new value: "%s"' % value)
## dbg(indent=0)
return value
def _insertKey(self, char, pos, sel_start, sel_to, value, allowAutoSelect=False):
""" Handles replacement of the character at the current insertion point."""
## dbg('MaskedEditMixin::_insertKey', "\'" + char + "\'", pos, sel_start, sel_to, '"%s"' % value, indent=1)
text = self._eraseSelection(value)
field = self._FindField(pos)
start, end = field._extent
newtext = ""
newpos = pos
# if >= 2 chars selected in a right-insert field, do appropriate erase on field,
# then set selection to end, and do usual right insert.
if sel_start != sel_to and sel_to >= sel_start+2:
field = self._FindField(sel_start)
if( field._insertRight # if right-insert
and field._allowInsert # and allow insert at any point in field
and field == self._FindField(sel_to) ): # and selection all in same field
text = self._OnErase(just_return_value=True) # remove selection before insert
## dbg('text after (left)erase: "%s"' % text)
pos = sel_start = sel_to
if pos != sel_start and sel_start == sel_to:
# adjustpos must have moved the position; make selection match:
sel_start = sel_to = pos
## dbg('field._insertRight?', field._insertRight)
## dbg('field._allowInsert?', field._allowInsert)
## dbg('sel_start, end', sel_start, end)
if sel_start < end:
## dbg('text[sel_start] != field._fillChar?', text[sel_start] != field._fillChar)
pass
if( field._insertRight # field allows right insert
and ((sel_start, sel_to) == field._extent # and whole field selected
or (sel_start == sel_to # or nothing selected
and (sel_start == end # and cursor at right edge
or (field._allowInsert # or field allows right-insert
and sel_start < end # next to other char in field:
and text[sel_start] != field._fillChar) ) ) ) ):
## dbg('insertRight')
fstr = text[start:end]
erasable_chars = [field._fillChar, ' ']
# if zero padding field, or a single digit, and currently a value of 0, allow erasure of 0:
if field._padZero or (field._isInt and (end - start == 1) and fstr[0] == '0'):
erasable_chars.append('0')
erased = ''
#### dbg("fstr[0]:'%s'" % fstr[0])
#### dbg('field_index:', field._index)
#### dbg("fstr[0] in erasable_chars?", fstr[0] in erasable_chars)
#### dbg("self._signOk and field._index == 0 and fstr[0] in ('-','(')?", self._signOk and field._index == 0 and fstr[0] in ('-','('))
if fstr[0] in erasable_chars or (self._signOk and field._index == 0 and fstr[0] in ('-','(')):
erased = fstr[0]
#### dbg('value: "%s"' % text)
#### dbg('fstr: "%s"' % fstr)
#### dbg("erased: '%s'" % erased)
field_sel_start = sel_start - start
field_sel_to = sel_to - start
## dbg('left fstr: "%s"' % fstr[1:field_sel_start])
## dbg('right fstr: "%s"' % fstr[field_sel_to:end])
fstr = fstr[1:field_sel_start] + char + fstr[field_sel_to:end]
if field._alignRight and sel_start != sel_to:
field_len = end - start
## pos += (field_len - len(fstr)) # move cursor right by deleted amount
pos = sel_to
## dbg('setting pos to:', pos)
if field._padZero:
fstr = '0' * (field_len - len(fstr)) + fstr
else:
fstr = fstr.rjust(field_len) # adjust the field accordingly
## dbg('field str: "%s"' % fstr)
newtext = text[:start] + fstr + text[end:]
if erased in ('-', '(') and self._signOk:
newtext = erased + newtext[1:]
## dbg('newtext: "%s"' % newtext)
if self._signOk and field._index == 0:
start -= 1 # account for sign position
#### dbg('field._moveOnFieldFull?', field._moveOnFieldFull)
#### dbg('len(fstr.lstrip()) == end-start?', len(fstr.lstrip()) == end-start)
if( field._moveOnFieldFull and pos == end
and len(fstr.lstrip()) == end-start # if field now full
and (not field._stopFieldChangeIfInvalid # and we either don't care about valid
or (field._stopFieldChangeIfInvalid # or we do and the current field value is valid
and field.IsValid(fstr)))):
newpos = self._findNextEntry(end) # go to next field
else:
newpos = pos # else keep cursor at current position
if not newtext:
## dbg('not newtext')
if newpos != pos:
## dbg('newpos:', newpos)
pass
if self._signOk and self._useParens:
old_right_signpos = text.find(')')
if field._allowInsert and not field._insertRight and sel_to <= end and sel_start >= start:
## dbg('inserting within a left-insert-capable field')
field_len = end - start
before = text[start:sel_start]
after = text[sel_to:end].strip()
#### dbg("current field:'%s'" % text[start:end])
#### dbg("before:'%s'" % before, "after:'%s'" % after)
new_len = len(before) + len(after) + 1 # (for inserted char)
#### dbg('new_len:', new_len)
if new_len < field_len:
retained = after + self._template[end-(field_len-new_len):end]
elif new_len > end-start:
retained = after[1:]
else:
retained = after
left = text[0:start] + before
#### dbg("left:'%s'" % left, "retained:'%s'" % retained)
right = retained + text[end:]
else:
left = text[0:pos]
right = text[pos+1:]
if 'unicode' in wx.PlatformInfo and type(char) != types.UnicodeType:
# convert the keyboard constant to a unicode value, to
# ensure it can be concatenated into the control value:
char = char.decode(self._defaultEncoding)
newtext = left + char + right
#### dbg('left: "%s"' % left)
#### dbg('right: "%s"' % right)
#### dbg('newtext: "%s"' % newtext)
if self._signOk and self._useParens:
# Balance parentheses:
left_signpos = newtext.find('(')
if left_signpos == -1: # erased '('; remove ')'
right_signpos = newtext.find(')')
if right_signpos != -1:
newtext = newtext[:right_signpos] + ' ' + newtext[right_signpos+1:]
elif old_right_signpos != -1:
right_signpos = newtext.find(')')
if right_signpos == -1: # just replaced right-paren
if newtext[pos] == ' ': # we just erased '); erase '('
newtext = newtext[:left_signpos] + ' ' + newtext[left_signpos+1:]
else: # replaced with digit; move ') over
if self._ctrl_constraints._alignRight or self._isFloat:
newtext = newtext[:-1] + ')'
else:
rstripped_text = newtext.rstrip()
right_signpos = len(rstripped_text)
## dbg('old_right_signpos:', old_right_signpos, 'right signpos now:', right_signpos)
newtext = newtext[:right_signpos] + ')' + newtext[right_signpos+1:]
if( field._insertRight # if insert-right field (but we didn't start at right edge)
and field._moveOnFieldFull # and should move cursor when full
and len(newtext[start:end].strip()) == end-start # and field now full
and (not field._stopFieldChangeIfInvalid # and we either don't care about valid
or (field._stopFieldChangeIfInvalid # or we do and the current field value is valid
and field.IsValid(newtext[start:end].strip())))):
newpos = self._findNextEntry(end) # go to next field
## dbg('newpos = nextentry =', newpos)
else:
## dbg('pos:', pos, 'newpos:', pos+1)
newpos = pos+1
if allowAutoSelect:
new_select_to = newpos # (default return values)
match_field = None
match_index = None
if field._autoSelect:
match_index, partial_match = self._autoComplete(1, # (always forward)
field._compareChoices,
newtext[start:end],
compareNoCase=field._compareNoCase,
current_index = field._autoCompleteIndex-1)
if match_index is not None and partial_match:
matched_str = newtext[start:end]
newtext = newtext[:start] + field._choices[match_index] + newtext[end:]
new_select_to = end
match_field = field
if field._insertRight:
# adjust position to just after partial match in field
newpos = end - (len(field._choices[match_index].strip()) - len(matched_str.strip()))
elif self._ctrl_constraints._autoSelect:
match_index, partial_match = self._autoComplete(
1, # (always forward)
self._ctrl_constraints._compareChoices,
newtext,
self._ctrl_constraints._compareNoCase,
current_index = self._ctrl_constraints._autoCompleteIndex - 1)
if match_index is not None and partial_match:
matched_str = newtext
newtext = self._ctrl_constraints._choices[match_index]
edit_end = self._ctrl_constraints._extent[1]
new_select_to = min(edit_end, len(newtext.rstrip()))
match_field = self._ctrl_constraints
if self._ctrl_constraints._insertRight:
# adjust position to just after partial match in control:
newpos = self._masklength - (len(self._ctrl_constraints._choices[match_index].strip()) - len(matched_str.strip()))
## dbg('newtext: "%s"' % newtext, 'newpos:', newpos, 'new_select_to:', new_select_to)
## dbg(indent=0)
return newtext, newpos, new_select_to, match_field, match_index
else:
## dbg('newtext: "%s"' % newtext, 'newpos:', newpos)
## dbg(indent=0)
return newtext, newpos
def _OnFocus(self,event):
"""
This event handler is currently necessary to work around new default
behavior as of wxPython2.3.3;
The TAB key auto selects the entire contents of the wx.TextCtrl *after*
the EVT_SET_FOCUS event occurs; therefore we can't query/adjust the selection
*here*, because it hasn't happened yet. So to prevent this behavior, and
preserve the correct selection when the focus event is not due to tab,
we need to pull the following trick:
"""
## dbg('MaskedEditMixin::_OnFocus')
if self.IsBeingDeleted() or self.GetParent().IsBeingDeleted():
return
wx.CallAfter(self._fixSelection)
event.Skip()
self.Refresh()
def _CheckValid(self, candidate=None):
"""
This is the default validation checking routine; It verifies that the
current value of the control is a "valid value," and has the side
effect of coloring the control appropriately.
"""
## dbg(suspend=1)
## dbg('MaskedEditMixin::_CheckValid: candidate="%s"' % candidate, indent=1)
oldValid = self._valid
if candidate is None: value = self._GetValue()
else: value = candidate
## dbg('value: "%s"' % value)
oldvalue = value
valid = True # assume True
if not self.IsDefault(value) and self._isDate: ## Date type validation
valid = self._validateDate(value)
## dbg("valid date?", valid)
elif not self.IsDefault(value) and self._isTime:
valid = self._validateTime(value)
## dbg("valid time?", valid)
elif not self.IsDefault(value) and (self._isInt or self._isFloat): ## Numeric type
valid = self._validateNumeric(value)
## dbg("valid Number?", valid)
if valid: # and not self.IsDefault(value): ## generic validation accounts for IsDefault()
## valid so far; ensure also allowed by any list or regex provided:
valid = self._validateGeneric(value)
## dbg("valid value?", valid)
## dbg('valid?', valid)
if not candidate:
self._valid = valid
self._applyFormatting()
if self._valid != oldValid:
## dbg('validity changed: oldValid =',oldValid,'newvalid =', self._valid)
## dbg('oldvalue: "%s"' % oldvalue, 'newvalue: "%s"' % self._GetValue())
pass
## dbg(indent=0, suspend=0)
return valid
def _validateGeneric(self, candidate=None):
""" Validate the current value using the provided list or Regex filter (if any).
"""
if candidate is None:
text = self._GetValue()
else:
text = candidate
valid = True # assume True
for i in [-1] + self._field_indices: # process global constraints first:
field = self._fields[i]
start, end = field._extent
slice = text[start:end]
valid = field.IsValid(slice)
if not valid:
break
return valid
def _validateNumeric(self, candidate=None):
""" Validate that the value is within the specified range (if specified.)"""
if candidate is None: value = self._GetValue()
else: value = candidate
try:
groupchar = self._fields[0]._groupChar
if self._isFloat:
number = float(value.replace(groupchar, '').replace(self._decimalChar, '.').replace('(', '-').replace(')', ''))
else:
number = long( value.replace(groupchar, '').replace('(', '-').replace(')', ''))
if value.strip():
if self._fields[0]._alignRight:
require_digit_at = self._fields[0]._extent[1]-1
else:
require_digit_at = self._fields[0]._extent[0]
## dbg('require_digit_at:', require_digit_at)
## dbg("value[rda]: '%s'" % value[require_digit_at])
if value[require_digit_at] not in list(string.digits):
valid = False
return valid
# else...
## dbg('number:', number)
if self._ctrl_constraints._hasRange:
valid = self._ctrl_constraints._rangeLow <= number <= self._ctrl_constraints._rangeHigh
else:
valid = True
groupcharpos = value.rfind(groupchar)
if groupcharpos != -1: # group char present
## dbg('groupchar found at', groupcharpos)
if self._isFloat and groupcharpos > self._decimalpos:
# 1st one found on right-hand side is past decimal point
## dbg('groupchar in fraction; illegal')
return False
elif self._isFloat:
integer = value[:self._decimalpos].strip()
else:
integer = value.strip()
## dbg("integer:'%s'" % integer)
if integer[0] in ('-', '('):
integer = integer[1:]
if integer[-1] == ')':
integer = integer[:-1]
parts = integer.split(groupchar)
## dbg('parts:', parts)
for i in range(len(parts)):
if i == 0 and abs(int(parts[0])) > 999:
## dbg('group 0 too long; illegal')
valid = False
break
elif i > 0 and (len(parts[i]) != 3 or ' ' in parts[i]):
## dbg('group %i (%s) not right size; illegal' % (i, parts[i]))
valid = False
break
except ValueError:
## dbg('value not a valid number')
valid = False
return valid
def _validateDate(self, candidate=None):
""" Validate the current date value using the provided Regex filter.
Generally used for character types.BufferType
"""
## dbg('MaskedEditMixin::_validateDate', indent=1)
if candidate is None: value = self._GetValue()
else: value = candidate
## dbg('value = "%s"' % value)
text = self._adjustDate(value, force4digit_year=True) ## Fix the date up before validating it
## dbg('text =', text)
valid = True # assume True until proven otherwise
try:
# replace fillChar in each field with space:
datestr = text[0:self._dateExtent]
for i in range(3):
field = self._fields[i]
start, end = field._extent
fstr = datestr[start:end]
fstr.replace(field._fillChar, ' ')
datestr = datestr[:start] + fstr + datestr[end:]
year, month, day = _getDateParts( datestr, self._datestyle)
year = int(year)
## dbg('self._dateExtent:', self._dateExtent)
if self._dateExtent == 11:
month = charmonths_dict[month.lower()]
else:
month = int(month)
day = int(day)
## dbg('year, month, day:', year, month, day)
except ValueError:
## dbg('cannot convert string to integer parts')
valid = False
except KeyError:
## dbg('cannot convert string to integer month')
valid = False
if valid:
# use wxDateTime to unambiguously try to parse the date:
# ### Note: because wxDateTime is *brain-dead* and expects months 0-11,
# rather than 1-12, so handle accordingly:
if month > 12:
valid = False
else:
month -= 1
try:
## dbg("trying to create date from values day=%d, month=%d, year=%d" % (day,month,year))
dateHandler = wx.DateTimeFromDMY(day,month,year)
## dbg("succeeded")
dateOk = True
except:
## dbg('cannot convert string to valid date')
dateOk = False
if not dateOk:
valid = False
if valid:
# wxDateTime doesn't take kindly to leading/trailing spaces when parsing,
# so we eliminate them here:
timeStr = text[self._dateExtent+1:].strip() ## time portion of the string
if timeStr:
## dbg('timeStr: "%s"' % timeStr)
try:
checkTime = dateHandler.ParseTime(timeStr)
valid = checkTime == len(timeStr)
except:
valid = False
if not valid:
## dbg('cannot convert string to valid time')
pass
## if valid: dbg('valid date')
## dbg(indent=0)
return valid
def _validateTime(self, candidate=None):
""" Validate the current time value using the provided Regex filter.
Generally used for character types.BufferType
"""
## dbg('MaskedEditMixin::_validateTime', indent=1)
# wxDateTime doesn't take kindly to leading/trailing spaces when parsing,
# so we eliminate them here:
if candidate is None: value = self._GetValue().strip()
else: value = candidate.strip()
## dbg('value = "%s"' % value)
valid = True # assume True until proven otherwise
dateHandler = wx.DateTime_Today()
try:
checkTime = dateHandler.ParseTime(value)
## dbg('checkTime:', checkTime, 'len(value)', len(value))
valid = checkTime == len(value)
except:
valid = False
if not valid:
## dbg('cannot convert string to valid time')
pass
## if valid: dbg('valid time')
## dbg(indent=0)
return valid
def _OnKillFocus(self,event):
""" Handler for EVT_KILL_FOCUS event.
"""
## dbg('MaskedEditMixin::_OnKillFocus', 'isDate=',self._isDate, indent=1)
if self.IsBeingDeleted() or self.GetParent().IsBeingDeleted():
return
if self._mask and self._IsEditable():
self._AdjustField(self._GetInsertionPoint())
self._CheckValid() ## Call valid handler
self._LostFocus() ## Provided for subclass use
event.Skip()
## dbg(indent=0)
def _fixSelection(self):
"""
This gets called after the TAB traversal selection is made, if the
focus event was due to this, but before the EVT_LEFT_* events if
the focus shift was due to a mouse event.
The trouble is that, a priori, there's no explicit notification of
why the focus event we received. However, the whole reason we need to
do this is because the default behavior on TAB traveral in a wx.TextCtrl is
now to select the entire contents of the window, something we don't want.
So we can *now* test the selection range, and if it's "the whole text"
we can assume the cause, change the insertion point to the start of
the control, and deselect.
"""
## dbg('MaskedEditMixin::_fixSelection', indent=1)
# can get here if called with wx.CallAfter after underlying
# control has been destroyed on close, but after focus
# events
if not self or not self._mask or not self._IsEditable():
## dbg(indent=0)
return
sel_start, sel_to = self._GetSelection()
## dbg('sel_start, sel_to:', sel_start, sel_to, 'self.IsEmpty()?', self.IsEmpty())
if( sel_start == 0 and sel_to >= len( self._mask ) #(can be greater in numeric controls because of reserved space)
and (not self._ctrl_constraints._autoSelect or self.IsEmpty() or self.IsDefault() ) ):
# This isn't normally allowed, and so assume we got here by the new
# "tab traversal" behavior, so we need to reset the selection
# and insertion point:
## dbg('entire text selected; resetting selection to start of control')
self._goHome()
field = self._FindField(self._GetInsertionPoint())
edit_start, edit_end = field._extent
if field._selectOnFieldEntry:
if self._isFloat or self._isInt and field == self._fields[0]:
edit_start = 0
self._SetInsertionPoint(edit_start)
self._SetSelection(edit_start, edit_end)
elif field._insertRight:
self._SetInsertionPoint(edit_end)
self._SetSelection(edit_end, edit_end)
elif (self._isFloat or self._isInt):
text, signpos, right_signpos = self._getAbsValue()
if text is None or text == self._template:
integer = self._fields[0]
edit_start, edit_end = integer._extent
if integer._selectOnFieldEntry:
## dbg('select on field entry:')
self._SetInsertionPoint(0)
self._SetSelection(0, edit_end)
elif integer._insertRight:
## dbg('moving insertion point to end')
self._SetInsertionPoint(edit_end)
self._SetSelection(edit_end, edit_end)
else:
## dbg('numeric ctrl is empty; start at beginning after sign')
self._SetInsertionPoint(signpos+1) ## Move past minus sign space if signed
self._SetSelection(signpos+1, signpos+1)
elif sel_start > self._goEnd(getPosOnly=True):
## dbg('cursor beyond the end of the user input; go to end of it')
self._goEnd()
else:
## dbg('sel_start, sel_to:', sel_start, sel_to, 'self._masklength:', self._masklength)
pass
## dbg(indent=0)
def _Keypress(self,key):
""" Method provided to override OnChar routine. Return False to force
a skip of the 'normal' OnChar process. Called before class OnChar.
"""
return True
def _LostFocus(self):
""" Method provided for subclasses. _LostFocus() is called after
the class processes its EVT_KILL_FOCUS event code.
"""
pass
def _OnDoubleClick(self, event):
""" selects field under cursor on dclick."""
pos = self._GetInsertionPoint()
field = self._FindField(pos)
start, end = field._extent
self._SetInsertionPoint(start)
self._SetSelection(start, end)
def _Change(self):
""" Method provided for subclasses. Called by internal EVT_TEXT
handler. Return False to override the class handler, True otherwise.
"""
return True
def _Cut(self):
"""
Used to override the default Cut() method in base controls, instead
copying the selection to the clipboard and then blanking the selection,
leaving only the mask in the selected area behind.
Note: _Cut (read "undercut" ;-) must be called from a Cut() override in the
derived control because the mixin functions can't override a method of
a sibling class.
"""
## dbg("MaskedEditMixin::_Cut", indent=1)
value = self._GetValue()
## dbg('current value: "%s"' % value)
sel_start, sel_to = self._GetSelection() ## check for a range of selected text
## dbg('selected text: "%s"' % value[sel_start:sel_to].strip())
do = wx.TextDataObject()
do.SetText(value[sel_start:sel_to].strip())
wx.TheClipboard.Open()
wx.TheClipboard.SetData(do)
wx.TheClipboard.Close()
if sel_to - sel_start != 0:
self._OnErase()
## dbg(indent=0)
# WS Note: overriding Copy is no longer necessary given that you
# can no longer select beyond the last non-empty char in the control.
#
## def _Copy( self ):
## """
## Override the wx.TextCtrl's .Copy function, with our own
## that does validation. Need to strip trailing spaces.
## """
## sel_start, sel_to = self._GetSelection()
## select_len = sel_to - sel_start
## textval = wx.TextCtrl._GetValue(self)
##
## do = wx.TextDataObject()
## do.SetText(textval[sel_start:sel_to].strip())
## wx.TheClipboard.Open()
## wx.TheClipboard.SetData(do)
## wx.TheClipboard.Close()
def _getClipboardContents( self ):
""" Subroutine for getting the current contents of the clipboard.
"""
do = wx.TextDataObject()
wx.TheClipboard.Open()
success = wx.TheClipboard.GetData(do)
wx.TheClipboard.Close()
if not success:
return None
else:
# Remove leading and trailing spaces before evaluating contents
return do.GetText().strip()
def _validatePaste(self, paste_text, sel_start, sel_to, raise_on_invalid=False):
"""
Used by paste routine and field choice validation to see
if a given slice of paste text is legal for the area in question:
returns validity, replacement text, and extent of paste in
template.
"""
## dbg(suspend=1)
## dbg('MaskedEditMixin::_validatePaste("%(paste_text)s", %(sel_start)d, %(sel_to)d), raise_on_invalid? %(raise_on_invalid)d' % locals(), indent=1)
select_length = sel_to - sel_start
maxlength = select_length
## dbg('sel_to - sel_start:', maxlength)
if maxlength == 0:
maxlength = self._masklength - sel_start
item = 'control'
else:
item = 'selection'
## dbg('maxlength:', maxlength)
if 'unicode' in wx.PlatformInfo and type(paste_text) != types.UnicodeType:
paste_text = paste_text.decode(self._defaultEncoding)
length_considered = len(paste_text)
if length_considered > maxlength:
## dbg('paste text will not fit into the %s:' % item, indent=0)
if raise_on_invalid:
## dbg(indent=0, suspend=0)
if item == 'control':
ve = ValueError('"%s" will not fit into the control "%s"' % (paste_text, self.name))
ve.value = paste_text
raise ve
else:
ve = ValueError('"%s" will not fit into the selection' % paste_text)
ve.value = paste_text
raise ve
else:
## dbg(indent=0, suspend=0)
return False, None, None
text = self._template
## dbg('length_considered:', length_considered)
valid_paste = True
replacement_text = ""
replace_to = sel_start
i = 0
while valid_paste and i < length_considered and replace_to < self._masklength:
if paste_text[i:] == self._template[replace_to:length_considered]:
# remainder of paste matches template; skip char-by-char analysis
## dbg('remainder paste_text[%d:] (%s) matches template[%d:%d]' % (i, paste_text[i:], replace_to, length_considered))
replacement_text += paste_text[i:]
replace_to = i = length_considered
continue
# else:
char = paste_text[i]
field = self._FindField(replace_to)
if not field._compareNoCase:
if field._forceupper: char = char.upper()
elif field._forcelower: char = char.lower()
## dbg('char:', "'"+char+"'", 'i =', i, 'replace_to =', replace_to)
## dbg('self._isTemplateChar(%d)?' % replace_to, self._isTemplateChar(replace_to))
if not self._isTemplateChar(replace_to) and self._isCharAllowed( char, replace_to, allowAutoSelect=False, ignoreInsertRight=True):
replacement_text += char
## dbg("not template(%(replace_to)d) and charAllowed('%(char)s',%(replace_to)d)" % locals())
## dbg("replacement_text:", '"'+replacement_text+'"')
i += 1
replace_to += 1
elif( char == self._template[replace_to]
or (self._signOk and
( (i == 0 and (char == '-' or (self._useParens and char == '(')))
or (i == self._masklength - 1 and self._useParens and char == ')') ) ) ):
replacement_text += char
## dbg("'%(char)s' == template(%(replace_to)d)" % locals())
## dbg("replacement_text:", '"'+replacement_text+'"')
i += 1
replace_to += 1
else:
next_entry = self._findNextEntry(replace_to, adjustInsert=False)
if next_entry == replace_to:
valid_paste = False
else:
replacement_text += self._template[replace_to:next_entry]
## dbg("skipping template; next_entry =", next_entry)
## dbg("replacement_text:", '"'+replacement_text+'"')
replace_to = next_entry # so next_entry will be considered on next loop
if not valid_paste and raise_on_invalid:
## dbg('raising exception', indent=0, suspend=0)
ve = ValueError('"%s" cannot be inserted into the control "%s"' % (paste_text, self.name))
ve.value = paste_text
raise ve
elif i < len(paste_text):
valid_paste = False
if raise_on_invalid:
## dbg('raising exception', indent=0, suspend=0)
ve = ValueError('"%s" will not fit into the control "%s"' % (paste_text, self.name))
ve.value = paste_text
raise ve
## dbg('valid_paste?', valid_paste)
if valid_paste:
## dbg('replacement_text: "%s"' % replacement_text, 'replace to:', replace_to)
pass
## dbg(indent=0, suspend=0)
return valid_paste, replacement_text, replace_to
def _Paste( self, value=None, raise_on_invalid=False, just_return_value=False ):
"""
Used to override the base control's .Paste() function,
with our own that does validation.
Note: _Paste must be called from a Paste() override in the
derived control because the mixin functions can't override a
method of a sibling class.
"""
## dbg('MaskedEditMixin::_Paste (value = "%s")' % value, indent=1)
if value is None:
paste_text = self._getClipboardContents()
else:
paste_text = value
if paste_text is not None:
if 'unicode' in wx.PlatformInfo and type(paste_text) != types.UnicodeType:
paste_text = paste_text.decode(self._defaultEncoding)
## dbg('paste text: "%s"' % paste_text)
# (conversion will raise ValueError if paste isn't legal)
sel_start, sel_to = self._GetSelection()
## dbg('selection:', (sel_start, sel_to))
# special case: handle allowInsert fields properly
field = self._FindField(sel_start)
edit_start, edit_end = field._extent
new_pos = None
if field._allowInsert and sel_to <= edit_end and (sel_start + len(paste_text) < edit_end or field._insertRight):
if field._insertRight:
# want to paste to the left; see if it will fit:
left_text = self._GetValue()[edit_start:sel_start].lstrip()
## dbg('len(left_text):', len(left_text))
## dbg('len(paste_text):', len(paste_text))
## dbg('sel_start - (len(left_text) + len(paste_text)) >= edit_start?', sel_start - (len(left_text) + len(paste_text)) >= edit_start)
if sel_start - (len(left_text) - (sel_to - sel_start) + len(paste_text)) >= edit_start:
# will fit! create effective paste text, and move cursor back to do so:
paste_text = left_text + paste_text
sel_start -= len(left_text)
paste_text = paste_text.rjust(sel_to - sel_start)
## dbg('modified paste_text to be: "%s"' % paste_text)
## dbg('modified selection to:', (sel_start, sel_to))
else:
## dbg("won't fit left;", 'paste text remains: "%s"' % paste_text)
pass
else:
paste_text = paste_text + self._GetValue()[sel_to:edit_end].rstrip()
## dbg("allow insert, but not insert right;", 'paste text set to: "%s"' % paste_text)
new_pos = sel_start + len(paste_text) # store for subsequent positioning
## dbg('paste within insertable field; adjusted paste_text: "%s"' % paste_text, 'end:', edit_end)
## dbg('expanded selection to:', (sel_start, sel_to))
# Another special case: paste won't fit, but it's a right-insert field where entire
# non-empty value is selected, and there's room if the selection is expanded leftward:
if( len(paste_text) > sel_to - sel_start
and field._insertRight
and sel_start > edit_start
and sel_to >= edit_end
and not self._GetValue()[edit_start:sel_start].strip() ):
# text won't fit within selection, but left of selection is empty;
# check to see if we can expand selection to accommodate the value:
empty_space = sel_start - edit_start
amount_needed = len(paste_text) - (sel_to - sel_start)
if amount_needed <= empty_space:
sel_start -= amount_needed
## dbg('expanded selection to:', (sel_start, sel_to))
# another special case: deal with signed values properly:
if self._signOk:
signedvalue, signpos, right_signpos = self._getSignedValue()
paste_signpos = paste_text.find('-')
if paste_signpos == -1:
paste_signpos = paste_text.find('(')
# if paste text will result in signed value:
#### dbg('paste_signpos != -1?', paste_signpos != -1)
#### dbg('sel_start:', sel_start, 'signpos:', signpos)
#### dbg('field._insertRight?', field._insertRight)
#### dbg('sel_start - len(paste_text) >= signpos?', sel_start - len(paste_text) <= signpos)
if paste_signpos != -1 and (sel_start <= signpos
or (field._insertRight and sel_start - len(paste_text) <= signpos)):
signed = True
else:
signed = False
# remove "sign" from paste text, so we can auto-adjust for sign type after paste:
paste_text = paste_text.replace('-', ' ').replace('(',' ').replace(')','')
## dbg('unsigned paste text: "%s"' % paste_text)
else:
signed = False
# another special case: deal with insert-right fields when selection is empty and
# cursor is at end of field:
#### dbg('field._insertRight?', field._insertRight)
#### dbg('sel_start == edit_end?', sel_start == edit_end)
#### dbg('sel_start', sel_start, 'sel_to', sel_to)
if field._insertRight and sel_start == edit_end and sel_start == sel_to:
sel_start -= len(paste_text)
if sel_start < 0:
sel_start = 0
## dbg('adjusted selection:', (sel_start, sel_to))
raise_on_invalid = raise_on_invalid or field._raiseOnInvalidPaste
try:
valid_paste, replacement_text, replace_to = self._validatePaste(paste_text, sel_start, sel_to, raise_on_invalid)
except:
## dbg('exception thrown', indent=0)
raise
if not valid_paste:
## dbg('paste text not legal for the selection or portion of the control following the cursor;')
if not wx.Validator_IsSilent():
wx.Bell()
## dbg(indent=0)
return None, -1
# else...
text = self._eraseSelection()
new_text = text[:sel_start] + replacement_text + text[replace_to:]
if new_text:
new_text = string.ljust(new_text,self._masklength)
if signed:
new_text, signpos, right_signpos = self._getSignedValue(candidate=new_text)
if new_text:
if self._useParens:
new_text = new_text[:signpos] + '(' + new_text[signpos+1:right_signpos] + ')' + new_text[right_signpos+1:]
else:
new_text = new_text[:signpos] + '-' + new_text[signpos+1:]
if not self._isNeg:
self._isNeg = 1
## dbg("new_text:", '"'+new_text+'"')
if not just_return_value:
if new_text != self._GetValue():
self.modified = True
if new_text == '':
self.ClearValue()
else:
wx.CallAfter(self._SetValue, new_text)
if new_pos is None:
new_pos = sel_start + len(replacement_text)
wx.CallAfter(self._SetInsertionPoint, new_pos)
else:
## dbg(indent=0)
return new_text, replace_to
elif just_return_value:
## dbg(indent=0)
return self._GetValue(), sel_to
## dbg(indent=0)
def _Undo(self, value=None, prev=None, just_return_results=False):
""" Provides an Undo() method in base controls. """
## dbg("MaskedEditMixin::_Undo", indent=1)
if value is None:
value = self._GetValue()
if prev is None:
prev = self._prevValue
## dbg('current value: "%s"' % value)
## dbg('previous value: "%s"' % prev)
if prev is None:
## dbg('no previous value', indent=0)
return
elif value != prev:
# Determine what to select: (relies on fixed-length strings)
# (This is a lot harder than it would first appear, because
# of mask chars that stay fixed, and so break up the "diff"...)
# Determine where they start to differ:
i = 0
length = len(value) # (both are same length in masked control)
while( value[:i] == prev[:i] ):
i += 1
sel_start = i - 1
# handle signed values carefully, so undo from signed to unsigned or vice-versa
# works properly:
if self._signOk:
text, signpos, right_signpos = self._getSignedValue(candidate=prev)
if self._useParens:
if prev[signpos] == '(' and prev[right_signpos] == ')':
self._isNeg = True
else:
self._isNeg = False
# eliminate source of "far-end" undo difference if using balanced parens:
value = value.replace(')', ' ')
prev = prev.replace(')', ' ')
elif prev[signpos] == '-':
self._isNeg = True
else:
self._isNeg = False
# Determine where they stop differing in "undo" result:
sm = difflib.SequenceMatcher(None, a=value, b=prev)
i, j, k = sm.find_longest_match(sel_start, length, sel_start, length)
## dbg('i,j,k = ', (i,j,k), 'value[i:i+k] = "%s"' % value[i:i+k], 'prev[j:j+k] = "%s"' % prev[j:j+k] )
if k == 0: # no match found; select to end
sel_to = length
else:
code_5tuples = sm.get_opcodes()
for op, i1, i2, j1, j2 in code_5tuples:
## dbg("%7s value[%d:%d] (%s) prev[%d:%d] (%s)" % (op, i1, i2, value[i1:i2], j1, j2, prev[j1:j2]))
pass
diff_found = False
# look backward through operations needed to produce "previous" value;
# first change wins:
for next_op in range(len(code_5tuples)-1, -1, -1):
op, i1, i2, j1, j2 = code_5tuples[next_op]
## dbg('value[i1:i2]: "%s"' % value[i1:i2], 'template[i1:i2] "%s"' % self._template[i1:i2])
field = self._FindField(i2)
if op == 'insert' and prev[j1:j2] != self._template[j1:j2]:
## dbg('insert found: selection =>', (j1, j2))
sel_start = j1
sel_to = j2
diff_found = True
break
elif op == 'delete' and value[i1:i2] != self._template[i1:i2]:
edit_start, edit_end = field._extent
if field._insertRight and (field._allowInsert or i2 == edit_end):
sel_start = i2
sel_to = i2
else:
sel_start = i1
sel_to = j1
## dbg('delete found: selection =>', (sel_start, sel_to))
diff_found = True
break
elif op == 'replace':
if not prev[i1:i2].strip() and field._insertRight:
sel_start = sel_to = j2
else:
sel_start = j1
sel_to = j2
## dbg('replace found: selection =>', (sel_start, sel_to))
diff_found = True
break
if diff_found:
# now go forwards, looking for earlier changes:
## dbg('searching forward...')
for next_op in range(len(code_5tuples)):
op, i1, i2, j1, j2 = code_5tuples[next_op]
field = self._FindField(i1)
if op == 'equal':
continue
elif op == 'replace':
if field._insertRight:
# if replace with spaces in an insert-right control, ignore "forward" replace
if not prev[i1:i2].strip():
continue
elif j1 < i1:
## dbg('setting sel_start to', j1)
sel_start = j1
else:
## dbg('setting sel_start to', i1)
sel_start = i1
else:
## dbg('setting sel_start to', i1)
sel_start = i1
## dbg('saw replace; breaking')
break
elif op == 'insert' and not value[i1:i2]:
## dbg('forward %s found' % op)
if prev[j1:j2].strip():
## dbg('item to insert non-empty; setting sel_start to', j1)
sel_start = j1
break
elif not field._insertRight:
## dbg('setting sel_start to inserted space:', j1)
sel_start = j1
break
elif op == 'delete':
## dbg('delete; field._insertRight?', field._insertRight, 'value[%d:%d].lstrip: "%s"' % (i1,i2,value[i1:i2].lstrip()))
if field._insertRight:
if value[i1:i2].lstrip():
## dbg('setting sel_start to ', j1)
sel_start = j1
## dbg('breaking loop')
break
else:
continue
else:
## dbg('saw delete; breaking')
break
else:
## dbg('unknown code!')
# we've got what we need
break
if not diff_found:
## dbg('no insert,delete or replace found (!)')
# do "left-insert"-centric processing of difference based on l.c.s.:
if i == j and j != sel_start: # match starts after start of selection
sel_to = sel_start + (j-sel_start) # select to start of match
else:
sel_to = j # (change ends at j)
# There are several situations where the calculated difference is
# not what we want to select. If changing sign, or just adding
# group characters, we really don't want to highlight the characters
# changed, but instead leave the cursor where it is.
# Also, there a situations in which the difference can be ambiguous;
# Consider:
#
# current value: 11234
# previous value: 1111234
#
# Where did the cursor actually lie and which 1s were selected on the delete
# operation?
#
# Also, difflib can "get it wrong;" Consider:
#
# current value: " 128.66"
# previous value: " 121.86"
#
# difflib produces the following opcodes, which are sub-optimal:
# equal value[0:9] ( 12) prev[0:9] ( 12)
# insert value[9:9] () prev[9:11] (1.)
# equal value[9:10] (8) prev[11:12] (8)
# delete value[10:11] (.) prev[12:12] ()
# equal value[11:12] (6) prev[12:13] (6)
# delete value[12:13] (6) prev[13:13] ()
#
# This should have been:
# equal value[0:9] ( 12) prev[0:9] ( 12)
# replace value[9:11] (8.6) prev[9:11] (1.8)
# equal value[12:13] (6) prev[12:13] (6)
#
# But it didn't figure this out!
#
# To get all this right, we use the previous selection recorded to help us...
if (sel_start, sel_to) != self._prevSelection:
## dbg('calculated selection', (sel_start, sel_to), "doesn't match previous", self._prevSelection)
prev_sel_start, prev_sel_to = self._prevSelection
field = self._FindField(sel_start)
if( self._signOk
and sel_start < self._masklength
and (prev[sel_start] in ('-', '(', ')')
or value[sel_start] in ('-', '(', ')')) ):
# change of sign; leave cursor alone...
## dbg("prev[sel_start] in ('-', '(', ')')?", prev[sel_start] in ('-', '(', ')'))
## dbg("value[sel_start] in ('-', '(', ')')?", value[sel_start] in ('-', '(', ')'))
## dbg('setting selection to previous one')
sel_start, sel_to = self._prevSelection
elif field._groupdigits and (value[sel_start:sel_to] == field._groupChar
or prev[sel_start:sel_to] == field._groupChar):
# do not highlight grouping changes
## dbg('value[sel_start:sel_to] == field._groupChar?', value[sel_start:sel_to] == field._groupChar)
## dbg('prev[sel_start:sel_to] == field._groupChar?', prev[sel_start:sel_to] == field._groupChar)
## dbg('setting selection to previous one')
sel_start, sel_to = self._prevSelection
else:
calc_select_len = sel_to - sel_start
prev_select_len = prev_sel_to - prev_sel_start
## dbg('sel_start == prev_sel_start', sel_start == prev_sel_start)
## dbg('sel_to > prev_sel_to', sel_to > prev_sel_to)
if prev_select_len >= calc_select_len:
# old selection was bigger; trust it:
## dbg('prev_select_len >= calc_select_len?', prev_select_len >= calc_select_len)
if not field._insertRight:
## dbg('setting selection to previous one')
sel_start, sel_to = self._prevSelection
else:
sel_to = self._prevSelection[1]
## dbg('setting selection to', (sel_start, sel_to))
elif( sel_to > prev_sel_to # calculated select past last selection
and prev_sel_to < len(self._template) # and prev_sel_to not at end of control
and sel_to == len(self._template) ): # and calculated selection goes to end of control
i, j, k = sm.find_longest_match(prev_sel_to, length, prev_sel_to, length)
## dbg('i,j,k = ', (i,j,k), 'value[i:i+k] = "%s"' % value[i:i+k], 'prev[j:j+k] = "%s"' % prev[j:j+k] )
if k > 0:
# difflib must not have optimized opcodes properly;
sel_to = j
else:
# look for possible ambiguous diff:
# if last change resulted in no selection, test from resulting cursor position:
if prev_sel_start == prev_sel_to:
calc_select_len = sel_to - sel_start
field = self._FindField(prev_sel_start)
# determine which way to search from last cursor position for ambiguous change:
if field._insertRight:
test_sel_start = prev_sel_start
test_sel_to = prev_sel_start + calc_select_len
else:
test_sel_start = prev_sel_start - calc_select_len
test_sel_to = prev_sel_start
else:
test_sel_start, test_sel_to = prev_sel_start, prev_sel_to
## dbg('test selection:', (test_sel_start, test_sel_to))
## dbg('calc change: "%s"' % prev[sel_start:sel_to])
## dbg('test change: "%s"' % prev[test_sel_start:test_sel_to])
# if calculated selection spans characters, and same characters
# "before" the previous insertion point are present there as well,
# select the ones related to the last known selection instead.
if( sel_start != sel_to
and test_sel_to < len(self._template)
and prev[test_sel_start:test_sel_to] == prev[sel_start:sel_to] ):
sel_start, sel_to = test_sel_start, test_sel_to
# finally, make sure that the old and new values are
# different where we say they're different:
while( sel_to - 1 > 0
and sel_to > sel_start
and value[sel_to-1:] == prev[sel_to-1:]):
sel_to -= 1
while( sel_start + 1 < self._masklength
and sel_start < sel_to
and value[:sel_start+1] == prev[:sel_start+1]):
sel_start += 1
## dbg('sel_start, sel_to:', sel_start, sel_to)
## dbg('previous value: "%s"' % prev)
## dbg(indent=0)
if just_return_results:
return prev, (sel_start, sel_to)
# else...
self._SetValue(prev)
self._SetInsertionPoint(sel_start)
self._SetSelection(sel_start, sel_to)
else:
## dbg('no difference between previous value')
## dbg(indent=0)
if just_return_results:
return prev, self._GetSelection()
def _OnClear(self, event):
""" Provides an action for context menu delete operation """
self.ClearValue()
def _OnContextMenu(self, event):
## dbg('MaskedEditMixin::OnContextMenu()', indent=1)
menu = wx.Menu()
menu.Append(wx.ID_UNDO, "Undo", "")
menu.AppendSeparator()
menu.Append(wx.ID_CUT, "Cut", "")
menu.Append(wx.ID_COPY, "Copy", "")
menu.Append(wx.ID_PASTE, "Paste", "")
menu.Append(wx.ID_CLEAR, "Delete", "")
menu.AppendSeparator()
menu.Append(wx.ID_SELECTALL, "Select All", "")
wx.EVT_MENU(menu, wx.ID_UNDO, self._OnCtrl_Z)
wx.EVT_MENU(menu, wx.ID_CUT, self._OnCtrl_X)
wx.EVT_MENU(menu, wx.ID_COPY, self._OnCtrl_C)
wx.EVT_MENU(menu, wx.ID_PASTE, self._OnCtrl_V)
wx.EVT_MENU(menu, wx.ID_CLEAR, self._OnClear)
wx.EVT_MENU(menu, wx.ID_SELECTALL, self._OnCtrl_A)
# ## WSS: The base control apparently handles
# enable/disable of wx.ID_CUT, wx.ID_COPY, wx.ID_PASTE
# and wx.ID_CLEAR menu items even if the menu is one
# we created. However, it doesn't do undo properly,
# so we're keeping track of previous values ourselves.
# Therefore, we have to override the default update for
# that item on the menu:
wx.EVT_UPDATE_UI(self, wx.ID_UNDO, self._UndoUpdateUI)
self._contextMenu = menu
self.PopupMenu(menu, event.GetPosition())
menu.Destroy()
self._contextMenu = None
## dbg(indent=0)
def _UndoUpdateUI(self, event):
if self._prevValue is None or self._prevValue == self._curValue:
self._contextMenu.Enable(wx.ID_UNDO, False)
else:
self._contextMenu.Enable(wx.ID_UNDO, True)
def _OnCtrlParametersChanged(self):
"""
Overridable function to allow derived classes to take action as a
result of parameter changes prior to possibly changing the value
of the control.
"""
pass
## ---------- ---------- ---------- ---------- ---------- ---------- ----------
class MaskedEditAccessorsMixin:
"""
To avoid a ton of boiler-plate, and to automate the getter/setter generation
for each valid control parameter so we never forget to add the functions when
adding parameters, this class programmatically adds the masked edit mixin
parameters to itself.
(This makes it easier for Designers like Boa to deal with masked controls.)
To further complicate matters, this is done with an extra level of inheritance,
so that "general" classes like masked.TextCtrl can have all possible attributes,
while derived classes, like masked.TimeCtrl and masked.NumCtrl can prevent
exposure of those optional attributes of their base class that do not make
sense for their derivation.
Therefore, we define:
BaseMaskedTextCtrl(TextCtrl, MaskedEditMixin)
and
masked.TextCtrl(BaseMaskedTextCtrl, MaskedEditAccessorsMixin).
This allows us to then derive:
masked.NumCtrl( BaseMaskedTextCtrl )
and not have to expose all the same accessor functions for the
derived control when they don't all make sense for it.
"""
# Define the default set of attributes exposed by the most generic masked controls:
exposed_basectrl_params = MaskedEditMixin.valid_ctrl_params.keys() + Field.valid_params.keys()
exposed_basectrl_params.remove('index')
exposed_basectrl_params.remove('extent')
exposed_basectrl_params.remove('foregroundColour') # (base class already has this)
for param in exposed_basectrl_params:
propname = param[0].upper() + param[1:]
exec('def Set%s(self, value): self.SetCtrlParameters(%s=value)' % (propname, param))
exec('def Get%s(self): return self.GetCtrlParameter("%s")''' % (propname, param))
if param.find('Colour') != -1:
# add non-british spellings, for backward-compatibility
propname.replace('Colour', 'Color')
exec('def Set%s(self, value): self.SetCtrlParameters(%s=value)' % (propname, param))
exec('def Get%s(self): return self.GetCtrlParameter("%s")''' % (propname, param))
## ---------- ---------- ---------- ---------- ---------- ---------- ----------
## these are helper subroutines:
def _movetofloat( origvalue, fmtstring, neg, addseparators=False, sepchar = ',',fillchar=' '):
""" addseparators = add separator character every three numerals if True
"""
fmt0 = fmtstring.split('.')
fmt1 = fmt0[0]
fmt2 = fmt0[1]
val = origvalue.split('.')[0].strip()
ret = fillchar * (len(fmt1)-len(val)) + val + "." + "0" * len(fmt2)
if neg:
ret = '-' + ret[1:]
return (ret,len(fmt1))
def _isDateType( fmtstring ):
""" Checks the mask and returns True if it fits an allowed
date or datetime format.
"""
dateMasks = ("^##/##/####",
"^##-##-####",
"^##.##.####",
"^####/##/##",
"^####-##-##",
"^####.##.##",
"^##/CCC/####",
"^##.CCC.####",
"^##/##/##$",
"^##/##/## ",
"^##/CCC/##$",
"^##.CCC.## ",)
reString = "|".join(dateMasks)
filter = re.compile( reString)
if re.match(filter,fmtstring): return True
return False
def _isTimeType( fmtstring ):
""" Checks the mask and returns True if it fits an allowed
time format.
"""
reTimeMask = "^##:##(:##)?( (AM|PM))?"
filter = re.compile( reTimeMask )
if re.match(filter,fmtstring): return True
return False
def _isFloatingPoint( fmtstring):
filter = re.compile("[ ]?[#]+\.[#]+\n")
if re.match(filter,fmtstring+"\n"): return True
return False
def _isInteger( fmtstring ):
filter = re.compile("[#]+\n")
if re.match(filter,fmtstring+"\n"): return True
return False
def _getDateParts( dateStr, dateFmt ):
if len(dateStr) > 11: clip = dateStr[0:11]
else: clip = dateStr
if clip[-2] not in string.digits:
clip = clip[:-1] # (got part of time; drop it)
dateSep = (('/' in clip) * '/') + (('-' in clip) * '-') + (('.' in clip) * '.')
slices = clip.split(dateSep)
if dateFmt == "MDY":
y,m,d = (slices[2],slices[0],slices[1]) ## year, month, date parts
elif dateFmt == "DMY":
y,m,d = (slices[2],slices[1],slices[0]) ## year, month, date parts
elif dateFmt == "YMD":
y,m,d = (slices[0],slices[1],slices[2]) ## year, month, date parts
else:
y,m,d = None, None, None
if not y:
return None
else:
return y,m,d
def _getDateSepChar(dateStr):
clip = dateStr[0:10]
dateSep = (('/' in clip) * '/') + (('-' in clip) * '-') + (('.' in clip) * '.')
return dateSep
def _makeDate( year, month, day, dateFmt, dateStr):
sep = _getDateSepChar( dateStr)
if dateFmt == "MDY":
return "%s%s%s%s%s" % (month,sep,day,sep,year) ## year, month, date parts
elif dateFmt == "DMY":
return "%s%s%s%s%s" % (day,sep,month,sep,year) ## year, month, date parts
elif dateFmt == "YMD":
return "%s%s%s%s%s" % (year,sep,month,sep,day) ## year, month, date parts
else:
return None
def _getYear(dateStr,dateFmt):
parts = _getDateParts( dateStr, dateFmt)
return parts[0]
def _getMonth(dateStr,dateFmt):
parts = _getDateParts( dateStr, dateFmt)
return parts[1]
def _getDay(dateStr,dateFmt):
parts = _getDateParts( dateStr, dateFmt)
return parts[2]
## ---------- ---------- ---------- ---------- ---------- ---------- ----------
class __test(wx.PySimpleApp):
def OnInit(self):
from wx.lib.rcsizer import RowColSizer
self.frame = wx.Frame( None, -1, "MaskedEditMixin 0.0.7 Demo Page #1", size = (700,600))
self.panel = wx.Panel( self.frame, -1)
self.sizer = RowColSizer()
self.labels = []
self.editList = []
rowcount = 4
id, id1 = wx.NewId(), wx.NewId()
self.command1 = wx.Button( self.panel, id, "&Close" )
self.command2 = wx.Button( self.panel, id1, "&AutoFormats" )
self.sizer.Add(self.command1, row=0, col=0, flag=wx.ALL, border = 5)
self.sizer.Add(self.command2, row=0, col=1, colspan=2, flag=wx.ALL, border = 5)
self.panel.Bind(wx.EVT_BUTTON, self.onClick, self.command1 )
## self.panel.SetDefaultItem(self.command1 )
self.panel.Bind(wx.EVT_BUTTON, self.onClickPage, self.command2)
self.check1 = wx.CheckBox( self.panel, -1, "Disallow Empty" )
self.check2 = wx.CheckBox( self.panel, -1, "Highlight Empty" )
self.sizer.Add( self.check1, row=0,col=3, flag=wx.ALL,border=5 )
self.sizer.Add( self.check2, row=0,col=4, flag=wx.ALL,border=5 )
self.panel.Bind(wx.EVT_CHECKBOX, self._onCheck1, self.check1 )
self.panel.Bind(wx.EVT_CHECKBOX, self._onCheck2, self.check2 )
label = """Press ctrl-s in any field to output the value and plain value. Press ctrl-x to clear and re-set any field.
Note that all controls have been auto-sized by including F in the format code.
Try entering nonsensical or partial values in validated fields to see what happens (use ctrl-s to test the valid status)."""
label2 = "\nNote that the State and Last Name fields are list-limited (Name:Smith,Jones,Williams)."
self.label1 = wx.StaticText( self.panel, -1, label)
self.label2 = wx.StaticText( self.panel, -1, "Description")
self.label3 = wx.StaticText( self.panel, -1, "Mask Value")
self.label4 = wx.StaticText( self.panel, -1, "Format")
self.label5 = wx.StaticText( self.panel, -1, "Reg Expr Val. (opt)")
self.label6 = wx.StaticText( self.panel, -1, "MaskedEdit Ctrl")
self.label7 = wx.StaticText( self.panel, -1, label2)
self.label7.SetForegroundColour("Blue")
self.label1.SetForegroundColour("Blue")
self.label2.SetFont(wx.Font(9,wx.SWISS,wx.NORMAL,wx.BOLD))
self.label3.SetFont(wx.Font(9,wx.SWISS,wx.NORMAL,wx.BOLD))
self.label4.SetFont(wx.Font(9,wx.SWISS,wx.NORMAL,wx.BOLD))
self.label5.SetFont(wx.Font(9,wx.SWISS,wx.NORMAL,wx.BOLD))
self.label6.SetFont(wx.Font(9,wx.SWISS,wx.NORMAL,wx.BOLD))
self.sizer.Add( self.label1, row=1,col=0,colspan=7, flag=wx.ALL,border=5)
self.sizer.Add( self.label7, row=2,col=0,colspan=7, flag=wx.ALL,border=5)
self.sizer.Add( self.label2, row=3,col=0, flag=wx.ALL,border=5)
self.sizer.Add( self.label3, row=3,col=1, flag=wx.ALL,border=5)
self.sizer.Add( self.label4, row=3,col=2, flag=wx.ALL,border=5)
self.sizer.Add( self.label5, row=3,col=3, flag=wx.ALL,border=5)
self.sizer.Add( self.label6, row=3,col=4, flag=wx.ALL,border=5)
# The following list is of the controls for the demo. Feel free to play around with
# the options!
controls = [
#description mask excl format regexp range,list,initial
("Phone No", "(###) ###-#### x:###", "", 'F!^-R', "^\(\d\d\d\) \d\d\d-\d\d\d\d", (),[],''),
("Last Name Only", "C{14}", "", 'F {list}', '^[A-Z][a-zA-Z]+', (),('Smith','Jones','Williams'),''),
("Full Name", "C{14}", "", 'F_', '^[A-Z][a-zA-Z]+ [A-Z][a-zA-Z]+', (),[],''),
("Social Sec#", "###-##-####", "", 'F', "\d{3}-\d{2}-\d{4}", (),[],''),
("U.S. Zip+4", "#{5}-#{4}", "", 'F', "\d{5}-(\s{4}|\d{4})",(),[],''),
("U.S. State (2 char)\n(with default)","AA", "", 'F!', "[A-Z]{2}", (),states, 'AZ'),
("Customer No", "\CAA-###", "", 'F!', "C[A-Z]{2}-\d{3}", (),[],''),
("Date (MDY) + Time\n(with default)", "##/##/#### ##:## AM", 'BCDEFGHIJKLMNOQRSTUVWXYZ','DFR!',"", (),[], r'03/05/2003 12:00 AM'),
("Invoice Total", "#{9}.##", "", 'F-R,', "", (),[], ''),
("Integer (signed)\n(with default)", "#{6}", "", 'F-R', "", (),[], '0 '),
("Integer (unsigned)\n(with default), 1-399", "######", "", 'F', "", (1,399),[], '1 '),
("Month selector", "XXX", "", 'F', "", (),
['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'],""),
("fraction selector","#/##", "", 'F', "^\d\/\d\d?", (),
['2/3', '3/4', '1/2', '1/4', '1/8', '1/16', '1/32', '1/64'], "")
]
for control in controls:
self.sizer.Add( wx.StaticText( self.panel, -1, control[0]),row=rowcount, col=0,border=5,flag=wx.ALL)
self.sizer.Add( wx.StaticText( self.panel, -1, control[1]),row=rowcount, col=1,border=5, flag=wx.ALL)
self.sizer.Add( wx.StaticText( self.panel, -1, control[3]),row=rowcount, col=2,border=5, flag=wx.ALL)
self.sizer.Add( wx.StaticText( self.panel, -1, control[4][:20]),row=rowcount, col=3,border=5, flag=wx.ALL)
if control in controls[:]:#-2]:
newControl = MaskedTextCtrl( self.panel, -1, "",
mask = control[1],
excludeChars = control[2],
formatcodes = control[3],
includeChars = "",
validRegex = control[4],
validRange = control[5],
choices = control[6],
defaultValue = control[7],
demo = True)
if control[6]: newControl.SetCtrlParameters(choiceRequired = True)
else:
newControl = MaskedComboBox( self.panel, -1, "",
choices = control[7],
choiceRequired = True,
mask = control[1],
formatcodes = control[3],
excludeChars = control[2],
includeChars = "",
validRegex = control[4],
validRange = control[5],
demo = True)
self.editList.append( newControl )
self.sizer.Add( newControl, row=rowcount,col=4,flag=wx.ALL,border=5)
rowcount += 1
self.sizer.AddGrowableCol(4)
self.panel.SetSizer(self.sizer)
self.panel.SetAutoLayout(1)
self.frame.Show(1)
self.MainLoop()
return True
def onClick(self, event):
self.frame.Close()
def onClickPage(self, event):
self.page2 = __test2(self.frame,-1,"")
self.page2.Show(True)
def _onCheck1(self,event):
""" Set required value on/off """
value = event.IsChecked()
if value:
for control in self.editList:
control.SetCtrlParameters(emptyInvalid=True)
control.Refresh()
else:
for control in self.editList:
control.SetCtrlParameters(emptyInvalid=False)
control.Refresh()
self.panel.Refresh()
def _onCheck2(self,event):
""" Highlight empty values"""
value = event.IsChecked()
if value:
for control in self.editList:
control.SetCtrlParameters( emptyBackgroundColour = 'Aquamarine')
control.Refresh()
else:
for control in self.editList:
control.SetCtrlParameters( emptyBackgroundColour = 'White')
control.Refresh()
self.panel.Refresh()
## ---------- ---------- ---------- ---------- ---------- ---------- ----------
class __test2(wx.Frame):
def __init__(self, parent, id, caption):
wx.Frame.__init__( self, parent, id, "MaskedEdit control 0.0.7 Demo Page #2 -- AutoFormats", size = (550,600))
from wx.lib.rcsizer import RowColSizer
self.panel = wx.Panel( self, -1)
self.sizer = RowColSizer()
self.labels = []
self.texts = []
rowcount = 4
label = """\
All these controls have been created by passing a single parameter, the AutoFormat code.
The class contains an internal dictionary of types and formats (autoformats).
To see a great example of validations in action, try entering a bad email address, then tab out."""
self.label1 = wx.StaticText( self.panel, -1, label)
self.label2 = wx.StaticText( self.panel, -1, "Description")
self.label3 = wx.StaticText( self.panel, -1, "AutoFormat Code")
self.label4 = wx.StaticText( self.panel, -1, "MaskedEdit Control")
self.label1.SetForegroundColour("Blue")
self.label2.SetFont(wx.Font(9,wx.SWISS,wx.NORMAL,wx.BOLD))
self.label3.SetFont(wx.Font(9,wx.SWISS,wx.NORMAL,wx.BOLD))
self.label4.SetFont(wx.Font(9,wx.SWISS,wx.NORMAL,wx.BOLD))
self.sizer.Add( self.label1, row=1,col=0,colspan=3, flag=wx.ALL,border=5)
self.sizer.Add( self.label2, row=3,col=0, flag=wx.ALL,border=5)
self.sizer.Add( self.label3, row=3,col=1, flag=wx.ALL,border=5)
self.sizer.Add( self.label4, row=3,col=2, flag=wx.ALL,border=5)
id, id1 = wx.NewId(), wx.NewId()
self.command1 = wx.Button( self.panel, id, "&Close")
self.command2 = wx.Button( self.panel, id1, "&Print Formats")
self.panel.Bind(wx.EVT_BUTTON, self.onClick, self.command1)
self.panel.SetDefaultItem(self.command1)
self.panel.Bind(wx.EVT_BUTTON, self.onClickPrint, self.command2)
# The following list is of the controls for the demo. Feel free to play around with
# the options!
controls = [
("Phone No","USPHONEFULLEXT"),
("US Date + Time","USDATETIMEMMDDYYYY/HHMM"),
("US Date MMDDYYYY","USDATEMMDDYYYY/"),
("Time (with seconds)","TIMEHHMMSS"),
("Military Time\n(without seconds)","24HRTIMEHHMM"),
("Social Sec#","USSOCIALSEC"),
("Credit Card","CREDITCARD"),
("Expiration MM/YY","EXPDATEMMYY"),
("Percentage","PERCENT"),
("Person's Age","AGE"),
("US Zip Code","USZIP"),
("US Zip+4","USZIPPLUS4"),
("Email Address","EMAIL"),
("IP Address", "(derived control IpAddrCtrl)")
]
for control in controls:
self.sizer.Add( wx.StaticText( self.panel, -1, control[0]),row=rowcount, col=0,border=5,flag=wx.ALL)
self.sizer.Add( wx.StaticText( self.panel, -1, control[1]),row=rowcount, col=1,border=5, flag=wx.ALL)
if control in controls[:-1]:
self.sizer.Add( MaskedTextCtrl( self.panel, -1, "",
autoformat = control[1],
demo = True),
row=rowcount,col=2,flag=wx.ALL,border=5)
else:
self.sizer.Add( IpAddrCtrl( self.panel, -1, "", demo=True ),
row=rowcount,col=2,flag=wx.ALL,border=5)
rowcount += 1
self.sizer.Add(self.command1, row=0, col=0, flag=wx.ALL, border = 5)
self.sizer.Add(self.command2, row=0, col=1, flag=wx.ALL, border = 5)
self.sizer.AddGrowableCol(3)
self.panel.SetSizer(self.sizer)
self.panel.SetAutoLayout(1)
def onClick(self, event):
self.Close()
def onClickPrint(self, event):
for format in masktags.keys():
sep = "+------------------------+"
print "%s\n%s \n Mask: %s \n RE Validation string: %s\n" % (sep,format, masktags[format]['mask'], masktags[format]['validRegex'])
## ---------- ---------- ---------- ---------- ---------- ---------- ----------
if __name__ == "__main__":
app = __test(False)
__i=0
##
## Current Issues:
## ===================================
##
## 1. WS: For some reason I don't understand, the control is generating two (2)
## EVT_TEXT events for every one (1) .SetValue() of the underlying control.
## I've been unsuccessful in determining why or in my efforts to make just one
## occur. So, I've added a hack to save the last seen value from the
## control in the EVT_TEXT handler, and if *different*, call event.Skip()
## to propagate it down the event chain, and let the application see it.
##
## 2. WS: MaskedComboBox is deficient in several areas, all having to do with the
## behavior of the underlying control that I can't fix. The problems are:
## a) The background coloring doesn't work in the text field of the control;
## instead, there's a only border around it that assumes the correct color.
## b) The control will not pass WXK_TAB to the event handler, no matter what
## I do, and there's no style wxCB_PROCESS_TAB like wxTE_PROCESS_TAB to
## indicate that we want these events. As a result, MaskedComboBox
## doesn't do the nice field-tabbing that MaskedTextCtrl does.
## c) Auto-complete had to be reimplemented for the control because programmatic
## setting of the value of the text field does not set up the auto complete
## the way that the control processing keystrokes does. (But I think I've
## implemented a fairly decent approximation.) Because of this the control
## also won't auto-complete on dropdown, and there's no event I can catch
## to work around this problem.
## d) There is no method provided for getting the selection; the hack I've
## implemented has its flaws, not the least of which is that due to the
## strategy that I'm using, the paste buffer is always replaced by the
## contents of the control's selection when in focus, on each keystroke;
## this makes it impossible to paste anything into a MaskedComboBox
## at the moment... :-(
## e) The other deficient behavior, likely induced by the workaround for (d),
## is that you can can't shift-left to select more than one character
## at a time.
##
##
## 3. WS: Controls on wxPanels don't seem to pass Shift-WXK_TAB to their
## EVT_KEY_DOWN or EVT_CHAR event handlers. Until this is fixed in
## wxWindows, shift-tab won't take you backwards through the fields of
## a MaskedTextCtrl like it should. Until then Shifted arrow keys will
## work like shift-tab and tab ought to.
##
## To-Do's:
## =============================##
## 1. Add Popup list for auto-completable fields that simulates combobox on individual
## fields. Example: City validates against list of cities, or zip vs zip code list.
## 2. Allow optional monetary symbols (eg. $, pounds, etc.) at front of a "decimal"
## control.
## 3. Fix shift-left selection for MaskedComboBox.
## 5. Transform notion of "decimal control" to be less "entire control"-centric,
## so that monetary symbols can be included and still have the appropriate
## semantics. (Big job, as currently written, but would make control even
## more useful for business applications.)
## CHANGELOG:
## ====================
## Version 1.13
## 1. Added parameter option stopFieldChangeIfInvalid, which can be used to relax the
## validation rules for a control, but make best efforts to stop navigation out of
## that field should its current value be invalid. Note: this does not prevent the
## value from remaining invalid if focus for the control is lost, via mousing etc.
##
## Version 1.12
## 1. Added proper support for NUMPAD keypad keycodes for navigation and control.
##
## Version 1.11
## 1. Added value member to ValueError exceptions, so that people can catch them
## and then display their own errors, and added attribute raiseOnInvalidPaste,
## so one doesn't have to subclass the controls simply to force generation of
## a ValueError on a bad paste operation.
## 2. Fixed handling of unicode charsets by converting to explicit control char
## set testing for passing those keystrokes to the base control, and then
## changing the semantics of the * maskchar to indicate any visible char.
## 3. Added '|' mask specification character, which allows splitting of contiguous
## mask characters into separate fields, allowing finer control of behavior
## of a control.
##
##
## Version 1.10
## 1. Added handling for WXK_DELETE and WXK_INSERT, such that shift-delete
## cuts, shift-insert pastes, and ctrl-insert copies.
##
## Version 1.9
## 1. Now ignores kill focus events when being destroyed.
## 2. Added missing call to set insertion point on changing fields.
## 3. Modified SetKeyHandler() to accept None as means of removing one.
## 4. Fixed keyhandler processing for group and decimal character changes.
## 5. Fixed a problem that prevented input into the integer digit of a
## integerwidth=1 numctrl, if the current value was 0.
## 6. Fixed logic involving processing of "_signOk" flag, to remove default
## sign key handlers if false, so that SetAllowNegative(False) in the
## NumCtrl works properly.
## 7. Fixed selection logic for numeric controls so that if selectOnFieldEntry
## is true, and the integer portion of an integer format control is selected
## and the sign position is selected, the sign keys will always result in a
## negative value, rather than toggling the previous sign.
##
##
## Version 1.8
## 1. Fixed bug involving incorrect variable name, causing combobox autocomplete to fail.
## 2. Added proper support for unicode version of wxPython
## 3. Added * as mask char meaning "all ansi chars" (ordinals 32-255).
## 4. Converted doc strings to use reST format, for ePyDoc documentation.
## 5. Renamed helper functions, classes, etc. not intended to be visible in public
## interface to code.
##
## Version 1.7
## 1. Fixed intra-right-insert-field erase, such that it doesn't leave a hole, but instead
## shifts the text to the left accordingly.
## 2. Fixed _SetValue() to place cursor after last character inserted, rather than end of
## mask.
## 3. Fixed some incorrect undo behavior for right-insert fields, and allowed derived classes
## (eg. numctrl) to pass modified values for undo processing (to handle/ignore grouping
## chars properly.)
## 4. Fixed autoselect behavior to work similarly to (2) above, so that combobox
## selection will only select the non-empty text, as per request.
## 5. Fixed tabbing to work with 2.5.2 semantics.
## 6. Fixed size calculation to handle changing fonts
##
## Version 1.6
## 1. Reorganized masked controls into separate package, renamed things accordingly
## 2. Split actual controls out of this file into their own files.
## Version 1.5
## (Reported) bugs fixed:
## 1. Crash ensues if you attempt to change the mask of a read-only
## MaskedComboBox after initial construction.
## 2. Changed strategy of defining Get/Set property functions so that
## these are now generated dynamically at runtime, rather than as
## part of the class definition. (This makes it possible to have
## more general base classes that have many more options for configuration
## without requiring that derivations support the same options.)
## 3. Fixed IsModified for _Paste() and _OnErase().
##
## Enhancements:
## 1. Fixed "attribute function inheritance," since base control is more
## generic than subsequent derivations, not all property functions of a
## generic control should be exposed in those derivations. New strategy
## uses base control classes (eg. BaseMaskedTextCtrl) that should be
## used to derive new class types, and mixed with their own mixins to
## only expose those attributes from the generic masked controls that
## make sense for the derivation. (This makes Boa happier.)
## 2. Renamed (with b-c) MILTIME autoformats to 24HRTIME, so as to be less
## "parochial."
##
## Version 1.4
## (Reported) bugs fixed:
## 1. Right-click menu allowed "cut" operation that destroyed mask
## (was implemented by base control)
## 2. MaskedComboBox didn't allow .Append() of mixed-case values; all
## got converted to lower case.
## 3. MaskedComboBox selection didn't deal with spaces in values
## properly when autocompleting, and didn't have a concept of "next"
## match for handling choice list duplicates.
## 4. Size of MaskedComboBox was always default.
## 5. Email address regexp allowed some "non-standard" things, and wasn't
## general enough.
## 6. Couldn't easily reset MaskedComboBox contents programmatically.
## 7. Couldn't set emptyInvalid during construction.
## 8. Under some versions of wxPython, readonly comboboxes can apparently
## return a GetInsertionPoint() result (655535), causing masked control
## to fail.
## 9. Specifying an empty mask caused the controls to traceback.
## 10. Can't specify float ranges for validRange.
## 11. '.' from within a the static portion of a restricted IP address
## destroyed the mask from that point rightward; tab when cursor is
## before 1st field takes cursor past that field.
##
## Enhancements:
## 12. Added Ctrl-Z/Undo handling, (and implemented context-menu properly.)
## 13. Added auto-select option on char input for masked controls with
## choice lists.
## 14. Added '>' formatcode, allowing insert within a given or each field
## as appropriate, rather than requiring "overwrite". This makes single
## field controls that just have validation rules (eg. EMAIL) much more
## friendly. The same flag controls left shift when deleting vs just
## blanking the value, and for right-insert fields, allows right-insert
## at any non-blank (non-sign) position in the field.
## 15. Added option to use to indicate negative values for numeric controls.
## 16. Improved OnFocus handling of numeric controls.
## 17. Enhanced Home/End processing to allow operation on a field level,
## using ctrl key.
## 18. Added individual Get/Set functions for control parameters, for
## simplified integration with Boa Constructor.
## 19. Standardized "Colour" parameter names to match wxPython, with
## non-british spellings still supported for backward-compatibility.
## 20. Added '&' mask specification character for punctuation only (no letters
## or digits).
## 21. Added (in a separate file) wx.MaskedCtrl() factory function to provide
## unified interface to the masked edit subclasses.
##
##
## Version 1.3
## 1. Made it possible to configure grouping, decimal and shift-decimal characters,
## to make controls more usable internationally.
## 2. Added code to smart "adjust" value strings presented to .SetValue()
## for right-aligned numeric format controls if they are shorter than
## than the control width, prepending the missing portion, prepending control
## template left substring for the missing characters, so that setting
## numeric values is easier.
## 3. Renamed SetMaskParameters SetCtrlParameters() (with old name preserved
## for b-c), as this makes more sense.
##
## Version 1.2
## 1. Fixed .SetValue() to replace the current value, rather than the current
## selection. Also changed it to generate ValueError if presented with
## either a value which doesn't follow the format or won't fit. Also made
## set value adjust numeric and date controls as if user entered the value.
## Expanded doc explaining how SetValue() works.
## 2. Fixed EUDATE* autoformats, fixed IsDateType mask list, and added ability to
## use 3-char months for dates, and EUDATETIME, and EUDATEMILTIME autoformats.
## 3. Made all date autoformats automatically pick implied "datestyle".
## 4. Added IsModified override, since base wx.TextCtrl never reports modified if
## .SetValue used to change the value, which is what the masked edit controls
## use internally.
## 5. Fixed bug in date position adjustment on 2 to 4 digit date conversion when
## using tab to "leave field" and auto-adjust.
## 6. Fixed bug in _isCharAllowed() for negative number insertion on pastes,
## and bug in ._Paste() that didn't account for signs in signed masks either.
## 7. Fixed issues with _adjustPos for right-insert fields causing improper
## selection/replacement of values
## 8. Fixed _OnHome handler to properly handle extending current selection to
## beginning of control.
## 9. Exposed all (valid) autoformats to demo, binding descriptions to
## autoformats.
## 10. Fixed a couple of bugs in email regexp.
## 11. Made maskchardict an instance var, to make mask chars to be more
## amenable to international use.
## 12. Clarified meaning of '-' formatcode in doc.
## 13. Fixed a couple of coding bugs being flagged by Python2.1.
## 14. Fixed several issues with sign positioning, erasure and validity
## checking for "numeric" masked controls.
## 15. Added validation to IpAddrCtrl.SetValue().
##
## Version 1.1
## 1. Changed calling interface to use boolean "useFixedWidthFont" (True by default)
## vs. literal font facename, and use wxTELETYPE as the font family
## if so specified.
## 2. Switched to use of dbg module vs. locally defined version.
## 3. Revamped entire control structure to use Field classes to hold constraint
## and formatting data, to make code more hierarchical, allow for more
## sophisticated masked edit construction.
## 4. Better strategy for managing options, and better validation on keywords.
## 5. Added 'V' format code, which requires that in order for a character
## to be accepted, it must result in a string that passes the validRegex.
## 6. Added 'S' format code which means "select entire field when navigating
## to new field."
## 7. Added 'r' format code to allow "right-insert" fields. (implies 'R'--right-alignment)
## 8. Added '<' format code to allow fields to require explicit cursor movement
## to leave field.
## 9. Added validFunc option to other validation mechanisms, that allows derived
## classes to add dynamic validation constraints to the control.
## 10. Fixed bug in validatePaste code causing possible IndexErrors, and also
## fixed failure to obey case conversion codes when pasting.
## 11. Implemented '0' (zero-pad) formatting code, as it wasn't being done anywhere...
## 12. Removed condition from OnDecimalPoint, so that it always truncates right on '.'
## 13. Enhanced IpAddrCtrl to use right-insert fields, selection on field traversal,
## individual field validation to prevent field values > 255, and require explicit
## tab/. to change fields.
## 14. Added handler for left double-click to select field under cursor.
## 15. Fixed handling for "Read-only" styles.
## 16. Separated signedForegroundColor from 'R' style, and added foregroundColor
## attribute, for more consistent and controllable coloring.
## 17. Added retainFieldValidation parameter, allowing top-level constraints
## such as "validRequired" to be set independently of field-level equivalent.
## (needed in TimeCtrl for bounds constraints.)
## 18. Refactored code a bit, cleaned up and commented code more heavily, fixed
## some of the logic for setting/resetting parameters, eg. fillChar, defaultValue,
## etc.
## 19. Fixed maskchar setting for upper/lowercase, to work in all locales.
##
##
## Version 1.0
## 1. Decimal point behavior restored for decimal and integer type controls:
## decimal point now trucates the portion > 0.
## 2. Return key now works like the tab character and moves to the next field,
## provided no default button is set for the form panel on which the control
## resides.
## 3. Support added in _FindField() for subclasses controls (like timecontrol)
## to determine where the current insertion point is within the mask (i.e.
## which sub-'field'). See method documentation for more info and examples.
## 4. Added Field class and support for all constraints to be field-specific
## in addition to being globally settable for the control.
## Choices for each field are validated for length and pastability into
## the field in question, raising ValueError if not appropriate for the control.
## Also added selective additional validation based on individual field constraints.
## By default, SHIFT-WXK_DOWN, SHIFT-WXK_UP, WXK_PRIOR and WXK_NEXT all
## auto-complete fields with choice lists, supplying the 1st entry in
## the choice list if the field is empty, and cycling through the list in
## the appropriate direction if already a match. WXK_DOWN will also auto-
## complete if the field is partially completed and a match can be made.
## SHIFT-WXK_UP/DOWN will also take you to the next field after any
## auto-completion performed.
## 5. Added autoCompleteKeycodes=[] parameters for allowing further
## customization of the control. Any keycode supplied as a member
## of the _autoCompleteKeycodes list will be treated like WXK_NEXT. If
## requireFieldChoice is set, then a valid value from each non-empty
## choice list will be required for the value of the control to validate.
## 6. Fixed "auto-sizing" to be relative to the font actually used, rather
## than making assumptions about character width.
## 7. Fixed GetMaskParameter(), which was non-functional in previous version.
## 8. Fixed exceptions raised to provide info on which control had the error.
## 9. Fixed bug in choice management of MaskedComboBox.
## 10. Fixed bug in IpAddrCtrl causing traceback if field value was of
## the form '# #'. Modified control code for IpAddrCtrl so that '.'
## in the middle of a field clips the rest of that field, similar to
## decimal and integer controls.
##
##
## Version 0.0.7
## 1. "-" is a toggle for sign; "+" now changes - signed numerics to positive.
## 2. ',' in formatcodes now causes numeric values to be comma-delimited (e.g.333,333).
## 3. New support for selecting text within the control.(thanks Will Sadkin!)
## Shift-End and Shift-Home now select text as you would expect
## Control-Shift-End selects to the end of the mask string, even if value not entered.
## Control-A selects all *entered* text, Shift-Control-A selects everything in the control.
## 4. event.Skip() added to onKillFocus to correct remnants when running in Linux (contributed-
## for some reason I couldn't find the original email but thanks!!!)
## 5. All major key-handling code moved to their own methods for easier subclassing: OnHome,
## OnErase, OnEnd, OnCtrl_X, OnCtrl_A, etc.
## 6. Email and autoformat validations corrected using regex provided by Will Sadkin (thanks!).
## (The rest of the changes in this version were done by Will Sadkin with permission from Jeff...)
## 7. New mechanism for replacing default behavior for any given key, using
## ._SetKeycodeHandler(keycode, func) and ._SetKeyHandler(char, func) now available
## for easier subclassing of the control.
## 8. Reworked the delete logic, cut, paste and select/replace logic, as well as some bugs
## with insertion point/selection modification. Changed Ctrl-X to use standard "cut"
## semantics, erasing the selection, rather than erasing the entire control.
## 9. Added option for an "default value" (ie. the template) for use when a single fillChar
## is not desired in every position. Added IsDefault() function to mean "does the value
## equal the template?" and modified .IsEmpty() to mean "do all of the editable
## positions in the template == the fillChar?"
## 10. Extracted mask logic into mixin, so we can have both MaskedTextCtrl and MaskedComboBox,
## now included.
## 11. MaskedComboBox now adds the capability to validate from list of valid values.
## Example: City validates against list of cities, or zip vs zip code list.
## 12. Fixed oversight in EVT_TEXT handler that prevented the events from being
## passed to the next handler in the event chain, causing updates to the
## control to be invisible to the parent code.
## 13. Added IPADDR autoformat code, and subclass IpAddrCtrl for controlling tabbing within
## the control, that auto-reformats as you move between cells.
## 14. Mask characters [A,a,X,#] can now appear in the format string as literals, by using '\'.
## 15. It is now possible to specify repeating masks, e.g. #{3}-#{3}-#{14}
## 16. Fixed major bugs in date validation, due to the fact that
## wxDateTime.ParseDate is too liberal, and will accept any form that
## makes any kind of sense, regardless of the datestyle you specified
## for the control. Unfortunately, the strategy used to fix it only
## works for versions of wxPython post 2.3.3.1, as a C++ assert box
## seems to show up on an invalid date otherwise, instead of a catchable
## exception.
## 17. Enhanced date adjustment to automatically adjust heuristic based on
## current year, making last century/this century determination on
## 2-digit year based on distance between today's year and value;
## if > 50 year separation, assume last century (and don't assume last
## century is 20th.)
## 18. Added autoformats and support for including HHMMSS as well as HHMM for
## date times, and added similar time, and militaray time autoformats.
## 19. Enhanced tabbing logic so that tab takes you to the next field if the
## control is a multi-field control.
## 20. Added stub method called whenever the control "changes fields", that
## can be overridden by subclasses (eg. IpAddrCtrl.)
## 21. Changed a lot of code to be more functionally-oriented so side-effects
## aren't as problematic when maintaining code and/or adding features.
## Eg: IsValid() now does not have side-effects; it merely reflects the
## validity of the value of the control; to determine validity AND recolor
## the control, _CheckValid() should be used with a value argument of None.
## Similarly, made most reformatting function take an optional candidate value
## rather than just using the current value of the control, and only
## have them change the value of the control if a candidate is not specified.
## In this way, you can do validation *before* changing the control.
## 22. Changed validRequired to mean "disallow chars that result in invalid
## value." (Old meaning now represented by emptyInvalid.) (This was
## possible once I'd made the changes in (19) above.)
## 23. Added .SetMaskParameters and .GetMaskParameter methods, so they
## can be set/modified/retrieved after construction. Removed individual
## parameter setting functions, in favor of this mechanism, so that
## all adjustment of the control based on changing parameter values can
## be handled in one place with unified mechanism.
## 24. Did a *lot* of testing and fixing re: numeric values. Added ability
## to type "grouping char" (ie. ',') and validate as appropriate.
## 25. Fixed ZIPPLUS4 to allow either 5 or 4, but if > 5 must be 9.
## 26. Fixed assumption about "decimal or integer" masks so that they're only
## made iff there's no validRegex associated with the field. (This
## is so things like zipcodes which look like integers can have more
## restrictive validation (ie. must be 5 digits.)
## 27. Added a ton more doc strings to explain use and derivation requirements
## and did regularization of the naming conventions.
## 28. Fixed a range bug in _adjustKey preventing z from being handled properly.
## 29. Changed behavior of '.' (and shift-.) in numeric controls to move to
## reformat the value and move the next field as appropriate. (shift-'.',
## ie. '>' moves to the previous field.
## Version 0.0.6
## 1. Fixed regex bug that caused autoformat AGE to invalidate any age ending
## in '0'.
## 2. New format character 'D' to trigger date type. If the user enters 2 digits in the
## year position, the control will expand the value to four digits, using numerals below
## 50 as 21st century (20+nn) and less than 50 as 20th century (19+nn).
## Also, new optional parameter datestyle = set to one of {MDY|DMY|YDM}
## 3. revalid parameter renamed validRegex to conform to standard for all validation
## parameters (see 2 new ones below).
## 4. New optional init parameter = validRange. Used only for int/dec (numeric) types.
## Allows the developer to specify a valid low/high range of values.
## 5. New optional init parameter = validList. Used for character types. Allows developer
## to send a list of values to the control to be used for specific validation.
## See the Last Name Only example - it is list restricted to Smith/Jones/Williams.
## 6. Date type fields now use wxDateTime's parser to validate the date and time.
## This works MUCH better than my kludgy regex!! Thanks to Robin Dunn for pointing
## me toward this solution!
## 7. Date fields now automatically expand 2-digit years when it can. For example,
## if the user types "03/10/67", then "67" will auto-expand to "1967". If a two-year
## date is entered it will be expanded in any case when the user tabs out of the
## field.
## 8. New class functions: SetValidBackgroundColor, SetInvalidBackgroundColor, SetEmptyBackgroundColor,
## SetSignedForeColor allow accessto override default class coloring behavior.
## 9. Documentation updated and improved.
## 10. Demo - page 2 is now a wxFrame class instead of a wxPyApp class. Works better.
## Two new options (checkboxes) - test highlight empty and disallow empty.
## 11. Home and End now work more intuitively, moving to the first and last user-entry
## value, respectively.
## 12. New class function: SetRequired(bool). Sets the control's entry required flag
## (i.e. disallow empty values if True).
##
## Version 0.0.5
## 1. get_plainValue method renamed to GetPlainValue following the wxWindows
## StudlyCaps(tm) standard (thanks Paul Moore). ;)
## 2. New format code 'F' causes the control to auto-fit (auto-size) itself
## based on the length of the mask template.
## 3. Class now supports "autoformat" codes. These can be passed to the class
## on instantiation using the parameter autoformat="code". If the code is in
## the dictionary, it will self set the mask, formatting, and validation string.
## I have included a number of samples, but I am hoping that someone out there
## can help me to define a whole bunch more.
## 4. I have added a second page to the demo (as well as a second demo class, test2)
## to showcase how autoformats work. The way they self-format and self-size is,
## I must say, pretty cool.
## 5. Comments added and some internal cosmetic revisions re: matching the code
## standards for class submission.
## 6. Regex validation is now done in real time - field turns yellow immediately
## and stays yellow until the entered value is valid
## 7. Cursor now skips over template characters in a more intuitive way (before the
## next keypress).
## 8. Change, Keypress and LostFocus methods added for convenience of subclasses.
## Developer may use these methods which will be called after EVT_TEXT, EVT_CHAR,
## and EVT_KILL_FOCUS, respectively.
## 9. Decimal and numeric handlers have been rewritten and now work more intuitively.
##
## Version 0.0.4
## 1. New .IsEmpty() method returns True if the control's value is equal to the
## blank template string
## 2. Control now supports a new init parameter: revalid. Pass a regular expression
## that the value will have to match when the control loses focus. If invalid,
## the control's BackgroundColor will turn yellow, and an internal flag is set (see next).
## 3. Demo now shows revalid functionality. Try entering a partial value, such as a
## partial social security number.
## 4. New .IsValid() value returns True if the control is empty, or if the value matches
## the revalid expression. If not, .IsValid() returns False.
## 5. Decimal values now collapse to decimal with '.00' on losefocus if the user never
## presses the decimal point.
## 6. Cursor now goes to the beginning of the field if the user clicks in an
## "empty" field intead of leaving the insertion point in the middle of the
## field.
## 7. New "N" mask type includes upper and lower chars plus digits. a-zA-Z0-9.
## 8. New formatcodes init parameter replaces other init params and adds functions.
## String passed to control on init controls:
## _ Allow spaces
## ! Force upper
## ^ Force lower
## R Show negative #s in red
## , Group digits
## - Signed numerals
## 0 Numeric fields get leading zeros
## 9. Ctrl-X in any field clears the current value.
## 10. Code refactored and made more modular (esp in OnChar method). Should be more
## easy to read and understand.
## 11. Demo enhanced.
## 12. Now has _doc_.
##
## Version 0.0.3
## 1. GetPlainValue() now returns the value without the template characters;
## so, for example, a social security number (123-33-1212) would return as
## 123331212; also removes white spaces from numeric/decimal values, so
## "- 955.32" is returned "-955.32". Press ctrl-S to see the plain value.
## 2. Press '.' in an integer style masked control and truncate any trailing digits.
## 3. Code moderately refactored. Internal names improved for clarity. Additional
## internal documentation.
## 4. Home and End keys now supported to move cursor to beginning or end of field.
## 5. Un-signed integers and decimals now supported.
## 6. Cosmetic improvements to the demo.
## 7. Class renamed to MaskedTextCtrl.
## 8. Can now specify include characters that will override the basic
## controls: for example, includeChars = "@." for email addresses
## 9. Added mask character 'C' -> allow any upper or lowercase character
## 10. .SetSignColor(str:color) sets the foreground color for negative values
## in signed controls (defaults to red)
## 11. Overview documentation written.
##
## Version 0.0.2
## 1. Tab now works properly when pressed in last position
## 2. Decimal types now work (e.g. #####.##)
## 3. Signed decimal or numeric values supported (i.e. negative numbers)
## 4. Negative decimal or numeric values now can show in red.
## 5. Can now specify an "exclude list" with the excludeChars parameter.
## See date/time formatted example - you can only enter A or P in the
## character mask space (i.e. AM/PM).
## 6. Backspace now works properly, including clearing data from a selected
## region but leaving template characters intact. Also delete key.
## 7. Left/right arrows now work properly.
## 8. Removed EventManager call from test so demo should work with wxPython 2.3.3
##
|
ibollen/repo | refs/heads/master | subcmds/abandon.py | 21 | #
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from command import Command
from git_command import git
from progress import Progress
class Abandon(Command):
common = True
helpSummary = "Permanently abandon a development branch"
helpUsage = """
%prog <branchname> [<project>...]
This subcommand permanently abandons a development branch by
deleting it (and all its history) from your local repository.
It is equivalent to "git branch -D <branchname>".
"""
def Execute(self, opt, args):
if not args:
self.Usage()
nb = args[0]
if not git.check_ref_format('heads/%s' % nb):
print >>sys.stderr, "error: '%s' is not a valid name" % nb
sys.exit(1)
nb = args[0]
err = []
all = self.GetProjects(args[1:])
pm = Progress('Abandon %s' % nb, len(all))
for project in all:
pm.update()
if not project.AbandonBranch(nb):
err.append(project)
pm.end()
if err:
if len(err) == len(all):
print >>sys.stderr, 'error: no project has branch %s' % nb
else:
for p in err:
print >>sys.stderr,\
"error: %s/: cannot abandon %s" \
% (p.relpath, nb)
sys.exit(1)
|
general-ai-challenge/Round1 | refs/heads/master | src/tasks/competition/to_be_validated.py | 1 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from core.task import Task, on_start, on_message, on_sequence,\
on_state_changed, on_timeout, on_output_message
import tasks.competition.messages as msg
import random
import re
# global data structures to be called by multiple tasks
# properties of objects in two baskets, for memory tasks
# (please keep objects in alphabetical order for ease of debugging)
global_properties = {
'john': {
'apple':
['green', 'sour', 'hard', 'cheap', 'healthy', 'juicy',
'local'],
'banana':
['yellow', 'sweet', 'soft', 'cheap', 'healthy', 'exotic',
'ripe'],
'beet':
['red', 'dirty', 'hard', 'old', 'cheap', 'sweet', 'healthy',
'local', 'large'],
'carrot':
['orange', 'hard', 'fresh', 'local', 'healthy', 'sweet',
'crunchy'],
'cucumber':
['green', 'fresh', 'juicy', 'local', 'cheap', 'healthy',
'frozen', 'crunchy'],
'mango':
['brown', 'rotten'],
'onion':
['white', 'pungent', 'smelly', 'cheap', 'local', 'healthy'],
'pear':
['brown', 'sweet', 'dry', 'cheap', 'local', 'big'],
'pineapple':
['yellow', 'sweet', 'hard', 'exotic', 'brown', 'rough'],
'potato':
['yellow', 'old', 'cheap', 'hard', 'tasteless', 'dirty',
'bumpy'],
'tomato':
['red', 'soft', 'sour', 'juicy', 'local', 'cheap']},
'mary': {
'apple':
['red', 'sweet', 'hard', 'fresh', 'juicy', 'expensive',
'crunchy'],
'asparagus':
['white', 'soft', 'old', 'long', 'expensive', 'dirty'],
'avocado':
['green', 'ripe', 'exotic', 'expensive', 'large', 'healthy',
'smooth', 'buttery'],
'banana':
['yellow', 'tasteless', 'soft', 'sweet', 'old', 'exotic'],
'carrot':
['orange', 'hard', 'old', 'dirty', 'local', 'small', 'crunchy'],
'cucumber':
['green', 'fresh', 'hard', 'cheap', 'local', 'long'],
'onion':
['yellow', 'old', 'cheap', 'dry', 'local', 'large'],
'mango':
['red', 'green', 'yellow', 'juicy', 'sweet', 'expensive'],
'pear':
['green', 'tasteless', 'hard', 'local', 'cheap', 'big'],
'pineapple':
['yellow', 'sweet', 'dry', 'fresh', 'expensive', 'exotic'],
'tomato':
['red', 'soft', 'sour', 'local', 'cheap']}
}
# it's handy to have a reverse dictionary with the properties in the
# above lists as keys, and the objects as values
reverse_global_properties = {}
for basket in global_properties:
reverse_global_properties[basket] = {}
for object in global_properties[basket]:
for property in global_properties[basket][object]:
if property not in reverse_global_properties[basket]:
reverse_global_properties[basket][property] = []
reverse_global_properties[basket][property].append(object)
# a list of questions about a number, shared by multiple tasks
number_questions = ['please tell me the number.',
'what\'s the number?',
'what is the number?',
'can you tell me the number?']
class ItalianHowManyPropertiesDoesAnObjectHaveTask(Task):
def __init__(self):
super(ItalianHowManyPropertiesDoesAnObjectHaveTask, self).__init__(
max_time=3000)
@on_start()
def give_instructions(self, event):
italian_object_translations = {'apple': 'mela',
'asparagus': 'asparago',
'avocado': 'avocado',
'banana': 'banana',
'beet': 'rapa',
'carrot': 'carota',
'cucumber': 'cetriolo',
'onion': 'cipolla',
'pear': 'pera',
'pineapple': 'ananas',
'potato': 'patata',
'tomato': 'pomodoro',
'mango': 'mango'}
italian_numbers_in_words=['zero', 'uno', 'due', 'tre', 'quattro', 'cinque', 'sei', 'sette', 'otto', 'nove', 'dieci']
basket = random.choice(global_properties.keys())
object=random.choice(global_properties[basket].keys())
# counting properties of selected object
self.property_count= len(global_properties[basket][object])
# translating the object
object=italian_object_translations[object]
# alphabetic conversion only supported up to ten
if (self.property_count<=10):
self.alphabetic_property_count=italian_numbers_in_words[self.property_count]
else:
self.alphabetic_property_count=''
message_string = "quante proprieta' ha " + object + " nel cestino di " + basket + "?"
self.set_message(message_string)
self.instructions_completed = False
@on_output_message(r"\?$")
def check_ending(self, event):
self.instructions_completed = True
@on_message()
def check_response(self, event):
if not self.instructions_completed:
self.ignore_last_char()
elif (
(event.message[-(len(str(self.property_count))+1):] == (str(self.property_count)+'.'))
or
(len(self.alphabetic_property_count)>0 and
(event.message[-(len(self.alphabetic_property_count)+1):] == (self.alphabetic_property_count+'.')))):
italian_msg_congratulations=['ottimo lavoro.',
'bravo.',
'congratulazioni.',
'giusto.',
'corretto.']
self.set_result(1, random.choice(italian_msg_congratulations))
@on_timeout()
def give_away_answer(self,event):
# randomly pick digit or string version
formatted_count = str(self.property_count)
# no choice if there is no alphabetic version, else flip a
# coin to decide whether to return digit or string version
if (len(self.alphabetic_property_count)>0 and
random.randint(0, 1) == 1):
formatted_count=self.alphabetic_property_count
self.set_message("la risposta corretta e': " + formatted_count + ".")
class GuessTheNumberAskingQuestionsExplicitModelTask(Task):
def __init__(self, ):
super(GuessTheNumberAskingQuestionsExplicitModelTask, self).__init__(
max_time=3000)
@on_start()
def give_instructions(self, event):
# picking a random nuber of digits between 1 and 5
self.digits = random.randint(1, 5)
# generating a random number with that number of digits
self.target_number=str(random.randint(1, 9)) # first value shouldn't be 0, although this doesn't really
# matter for our current purposes
self.target_number+=''.join(["%s" % random.randint(0, 9) for i in range(1, self.digits)]) # this relies
# on weird limit properties of Python's range
# preparing a regexp to capture requests for help
# we need to escape the periods and question marks in number_questions
escaped_number_questions=[]
for question in number_questions:
escaped_number_questions.append(re.sub(r'([\.\?])',r'\\\1',question))
self.re_query = re.compile(r".*(" + "|".join(escaped_number_questions) + ")$")
# preparing the message
message_string = "guess the " + str(self.digits) +"-digit number I am thinking of; you can ask me: " + random.choice(number_questions)
self.set_message(message_string)
self.instructions_completed = False
@on_output_message(r"[\.\?]$")
def check_ending(self, event):
self.instructions_completed = True
@on_message()
def check_response(self, event):
if not self.instructions_completed:
self.ignore_last_char()
elif self.re_query.match(event.message):
self.set_message(self.target_number + '.')
elif event.message[-(self.digits+1):] == (self.target_number + '.'):
self.set_result(1, random.choice(msg.congratulations))
@on_timeout()
def give_away_answer(self,event):
self.set_message('if you asked: ' + random.choice(number_questions) + ', I would have said: '+ self.target_number + '.')
class GuessTheNumberAskingQuestionsTask(Task):
def __init__(self):
super(GuessTheNumberAskingQuestionsTask, self).__init__(
max_time=3000)
@on_start()
def give_instructions(self, event):
# picking a random nuber of digits between 1 and 5
self.digits = random.randint(1, 5)
# generating a random number with that number of digits
self.target_number=str(random.randint(1, 9)) # first value shouldn't be 0, although this doesn't really
# matter for our current purposes
self.target_number+=''.join(["%s" % random.randint(0, 9) for i in range(1, self.digits)]) # this relies
# on weird limit properties of Python's range
# preparing a regexp to capture requests for help
# we need to escape the periods and question marks in number_questions
escaped_number_questions=[]
for question in number_questions:
escaped_number_questions.append(re.sub(r'([\.\?])',r'\\\1',question))
self.re_query = re.compile(r".*(" + "|".join(escaped_number_questions) + ")$")
# preparing the message
message_string = "guess the " + str(self.digits) +"-digit number I am thinking of; you can ask me for the number."
self.set_message(message_string)
self.instructions_completed = False
@on_output_message(r"\.$")
def check_ending(self, event):
self.instructions_completed = True
@on_message()
def check_response(self, event):
if not self.instructions_completed:
self.ignore_last_char()
elif self.re_query.match(event.message):
self.set_message(self.target_number + '.')
elif event.message[-(self.digits+1):] == (self.target_number + '.'):
self.set_result(1, random.choice(msg.congratulations))
@on_timeout()
def give_away_answer(self,event):
self.set_message('if you asked: ' + random.choice(number_questions) + ', I would have said: '+ self.target_number + '.')
class GuessTheNumberAskingForDigitsExplicitModelTask(Task):
def __init__(self):
super(GuessTheNumberAskingForDigitsExplicitModelTask, self).__init__(
max_time=3500)
@on_start()
def give_instructions(self, event):
# we need to edit the number_questions list by replacing
# "number" with "next digit"; we will keep two versions of the
# resulting list: one with just the relevant string replaced,
# and one with escaped .? for the regular expression
self.digit_questions=[]
escaped_digit_questions=[]
for question in number_questions:
digit_question=re.sub('number', 'next digit',question)
self.digit_questions.append(digit_question)
escaped_digit_questions.append(re.sub(r'([\.\?])',r'\\\1',digit_question))
# picking a random nuber of digits between 1 and 5
self.digits = random.randint(1,5)
# generating a random number with that number of digits
self.target_number=str(random.randint(1,9)) # first value shouldn't be 0, although this doesn't really
# matter for our current purposes
self.target_number+=''.join(["%s" % random.randint(0, 9) for i in range(1, self.digits)]) # this relies
# on weird limit properties of Python's range
# preparing a regexp to capture requests for help
self.re_query = re.compile(r".*(" + "|".join(escaped_digit_questions) + ")$")
# also, we initialize a counter to keep track of the next digit
self.next_digit=0
# preparing the message
message_string = "guess the " + str(self.digits) + "-digit number I am thinking of; you can ask me: " + random.choice(self.digit_questions)
self.set_message(message_string)
self.instructions_completed = False
@on_output_message(r"[\.\?]$")
def check_ending(self, event):
self.instructions_completed = True
@on_message()
def check_response(self, event):
if not self.instructions_completed:
self.ignore_last_char()
elif self.re_query.match(event.message):
if (self.next_digit<self.digits):
self.set_message(self.target_number[self.next_digit] + '.')
self.next_digit+=1
else:
self.set_message('the number has only ' + str(self.digits) + ' digits.')
elif event.message[-(self.digits+1):] == (self.target_number + '.'):
self.set_result(1, random.choice(msg.congratulations))
@on_timeout()
def give_away_answer(self,event):
give_away_message = ''
if (self.next_digit<(self.digits)):
give_away_message += 'if you asked: ' + random.choice(self.digit_questions) + ', I would have said: ' + self.target_number[self.next_digit] + '. '
give_away_message += 'the number is ' + self.target_number + '.'
self.set_message(give_away_message)
class GuessTheNumberAskingForDigitsTask(Task):
def __init__(self):
super(GuessTheNumberAskingForDigitsTask, self).__init__(
max_time=3500)
@on_start()
def give_instructions(self, event):
# we need to edit the number_questions list by replacing
# "number" with "next digit"; we will keep two versions of the
# resulting list: one with just the relevant string replaced,
# and one with escaped .? for the regular expression
self.digit_questions=[]
escaped_digit_questions=[]
for question in number_questions:
digit_question=re.sub('number', 'next digit',question)
self.digit_questions.append(digit_question)
escaped_digit_questions.append(re.sub(r'([\.\?])',r'\\\1',digit_question))
# picking a random nuber of digits between 1 and 5
self.digits = random.randint(1,5)
# generating a random number with that number of digits
self.target_number=str(random.randint(1,9)) # first value shouldn't be 0, although this doesn't really
# matter for our current purposes
self.target_number+=''.join(["%s" % random.randint(0, 9) for i in range(1, self.digits)]) # this relies
# on weird limit properties of Python's range
# preparing a regexp to capture requests for help
self.re_query = re.compile(r".*(" + "|".join(escaped_digit_questions) + ")$")
# also, we initialize a counter to keep track of the next digit
self.next_digit=0
# preparing the message
message_string = "guess the " + str(self.digits) + "-digit number I am thinking of; you can ask me for the next digit."
self.set_message(message_string)
self.instructions_completed = False
@on_output_message(r"\.$")
def check_ending(self, event):
self.instructions_completed = True
@on_message()
def check_response(self, event):
if not self.instructions_completed:
self.ignore_last_char()
elif self.re_query.match(event.message):
if (self.next_digit<self.digits):
self.set_message(self.target_number[self.next_digit] + '.')
self.next_digit+=1
else:
self.set_message('the number has only ' + str(self.digits) + ' digits.')
elif event.message[-(self.digits+1):] == (self.target_number + '.'):
self.set_result(1, random.choice(msg.congratulations))
@on_timeout()
def give_away_answer(self,event):
give_away_message = ''
if (self.next_digit<(self.digits)):
give_away_message += 'if you asked: ' + random.choice(self.digit_questions) + ', I would have said: ' + self.target_number[self.next_digit] + '. '
give_away_message += 'the number is ' + self.target_number + '.'
self.set_message(give_away_message)
# OLD STUFF FROM HERE
# here, I define a global character-by-character association list that
# can be used by the tasks below that rely on the same association
# scheme (here and below, global means: accessible by all tasks in
# this file; local means: accessible within one task only)
# we select a subset of characters as primes, so we can also define
# tasks with disjoint primes from within and without this list
# the following global variable tells us the size of this subset:
global_prime_cardinality=5
alphabet_integers=list(range(0,26)) # weirdly, left value is
# inclusive, right value is
# exclusive
random.shuffle(alphabet_integers)
# conversion to tuple for compatibility with local tables, that HAVE to be tuples
global_primes=tuple(alphabet_integers[0:global_prime_cardinality]) # usual python weirdness
random.shuffle(alphabet_integers)
global_targets=tuple(alphabet_integers[0:global_prime_cardinality]) # usual python weirdness
# the following function returns instead matching primes and targets
# tuples, generating them each time it is called: it will be used by
# "local" tasks to generate their own mapping tables (note that
# objects returned are two TUPLES, as needed by the task classes):
def generate_local_prime_and_target_mappings(prime_cardinality):
alphabet_integers=list(range(0,26)) # weirdly, left value is
# inclusive, right value is
# exclusive
random.shuffle(alphabet_integers)
primes=alphabet_integers[0:prime_cardinality] # usual
#python silliness: this will range from 0 to
#prime_cardinality-1
primes=tuple(primes) # we must fix to tuple, or else class will break down
random.shuffle(alphabet_integers)
targets=alphabet_integers[0:prime_cardinality] # usual python weirdness
targets=tuple(targets) # we must fix to tuple, or else class will break down
# also deleting alphabet_integers
del(alphabet_integers)
return([primes,targets])
# the following function generates prime and target strings, according
# to the tables passed as arguments
def generate_prime_and_target(primes,targets,string_length,prime_cardinality):
raw_prime = [random.randint(0, (prime_cardinality - 1)) for i in
range(string_length)]
prime = ''.join(chr(ord('a') + primes[x]) for x in raw_prime)
target = ''.join(chr(ord('a') + targets[x]) for x in raw_prime)
return([prime,target])
# TASKS START HERE
class RepeatCharacter(Task):
def __init__(self):
super(RepeatCharacter, self).__init__(
max_time=500)
@on_start()
def give_instructions(self, event):
self.prime = chr(ord('a') + random.randint(0, 25))
self.prime += "."
self.set_message(self.prime)
self.instructions_completed = False
@on_output_message(r"\.$")
def check_ending(self, event):
self.instructions_completed = True
@on_message()
def check_character(self, event):
if not self.instructions_completed:
pass
elif event.message[-2:] == self.prime:
self.set_result(1)
class RepeatStringMax4(Task):
def __init__(self):
super(RepeatStringMax4, self).__init__(
max_time=500)
@on_start()
def give_instructions(self, event):
self.string_length = random.randint(1, 4)
self.prime = ""
for i in range(self.string_length):
self.prime += chr(ord('a') + random.randint(0, 25))
self.prime += "."
self.set_message(self.prime)
self.instructions_completed = False
@on_output_message(r"\.$")
def check_ending(self, event):
self.instructions_completed = True
@on_message()
def check_character(self, event):
if not self.instructions_completed:
pass
elif event.message[-len(self.prime):] == self.prime:
self.set_result(1)
class RepeatStringMin5Max10(Task):
def __init__(self):
super(RepeatStringMin5Max10, self).__init__(
max_time=500)
@on_start()
def give_instructions(self, event):
self.string_length = random.randint(5, 10)
self.prime = ""
for i in range(self.string_length):
self.prime += chr(ord('a') + random.randint(0, 25))
self.prime += "."
self.set_message(self.prime)
self.instructions_completed = False
@on_output_message(r"\.$")
def check_ending(self, event):
self.instructions_completed = True
@on_message()
def check_character(self, event):
if not self.instructions_completed:
pass
elif event.message[-len(self.prime):] == self.prime:
self.set_result(1)
class GlobalTwoAssociatedCharacters(Task):
def __init__(self):
super(GlobalTwoAssociatedCharacters, self).__init__(
max_time=500)
@on_start()
def give_instructions(self, event):
self.prime,self.target=generate_prime_and_target(global_primes,global_targets,1,global_prime_cardinality)
self.set_message(self.prime+self.target+".")
self.instructions_completed = False
@on_output_message(r"\.$")
def check_ending(self, event):
self.instructions_completed = True
@on_message()
def check_character(self, event):
if not self.instructions_completed:
pass
elif event.message[-2:] == self.target + ".":
self.set_result(1)
class GlobalCharacterPrimeTarget(Task):
def __init__(self):
super(GlobalCharacterPrimeTarget, self).__init__(
max_time=500)
@on_start()
def give_instructions(self, event):
self.prime,self.target=generate_prime_and_target(global_primes,global_targets,1,global_prime_cardinality)
self.target += "."
self.set_message(self.prime + ".")
self.instructions_completed = False
@on_output_message(r"\.$")
def check_ending(self, event):
self.instructions_completed = True
@on_message()
def check_character(self, event):
if not self.instructions_completed:
pass
elif event.message[-2:] == self.target:
self.set_result(1)
class LocalCharacterPrimeTarget(Task):
# get local primes and targets
primes,targets=generate_local_prime_and_target_mappings(global_prime_cardinality)
# note that we use the same number of distinct primes as in the global
# table, but they are not constrained to be the same (nor to be
#disjoint)
def __init__(self):
super(LocalCharacterPrimeTarget, self).__init__(
max_time=500)
# debug
# self.logger=logging.getLogger(__name__)
# self.logger.debug("local primes " + str(self.primes))
# self.logger.debug("local targets " + str(self.targets))
@on_start()
def give_instructions(self, event):
self.prime,self.target=generate_prime_and_target(self.primes,self.targets,1,global_prime_cardinality)
self.target += "."
self.set_message(self.prime + ".")
self.instructions_completed = False
@on_output_message(r"\.$")
def check_ending(self, event):
self.instructions_completed = True
@on_message()
def check_character(self, event):
if not self.instructions_completed:
pass
elif event.message[-2:] == self.target:
self.set_result(1)
class GlobalTwoAssociatedDelimitedStringsMax4(Task):
def __init__(self):
super(GlobalTwoAssociatedDelimitedStringsMax4, self).__init__(
max_time=500)
@on_start()
def give_instructions(self, event):
self.string_length = random.randint(1, 4)
self.prime,self.target=generate_prime_and_target(global_primes,global_targets,self.string_length,global_prime_cardinality)
self.set_message(self.prime + '#' + self.target + ".")
self.instructions_completed = False
@on_output_message(r"\.$")
def check_ending(self, event):
self.instructions_completed = True
@on_message()
def check_character(self, event):
if not self.instructions_completed:
pass
elif event.message[-len(self.target):] == self.target:
self.set_result(1)
class GlobalTwoAssociatedStringsMax4(Task):
def __init__(self):
super(GlobalTwoAssociatedStringsMax4, self).__init__(
max_time=500)
@on_start()
def give_instructions(self, event):
self.string_length = random.randint(1, 4)
self.prime,self.target=generate_prime_and_target(global_primes,global_targets,self.string_length,global_prime_cardinality)
self.set_message(self.prime + self.target + ".")
self.instructions_completed = False
@on_output_message(r"\.$")
def check_ending(self, event):
self.instructions_completed = True
@on_message()
def check_character(self, event):
if not self.instructions_completed:
pass
elif event.message[-len(self.target):] == self.target:
self.set_result(1)
class LocalTwoAssociatedDelimitedStringsMax4(Task):
# for comments, see first Local task in this file
# get local primes and targets
primes,targets=generate_local_prime_and_target_mappings(global_prime_cardinality)
def __init__(self):
super(LocalTwoAssociatedDelimitedStringsMax4, self).__init__(
max_time=500)
@on_start()
def give_instructions(self, event):
string_length = random.randint(1, 4)
self.prime,self.target=generate_prime_and_target(self.primes,self.targets,
self.string_length,global_prime_cardinality)
self.set_message(self.prime + '#' + self.target + ".")
self.instructions_completed = False
@on_output_message(r"\.$")
def check_ending(self, event):
self.instructions_completed = True
@on_message()
def check_character(self, event):
if not self.instructions_completed:
pass
elif event.message[-len(self.target):] == self.target:
self.set_result(1)
class LocalTwoAssociatedStringsMax4(Task):
# for comments, see first Local task in this file
# get local primes and targets
primes,targets=generate_local_prime_and_target_mappings(global_prime_cardinality)
def __init__(self):
super(LocalTwoAssociatedStringsMax4, self).__init__(
max_time=500)
@on_start()
def give_instructions(self, event):
self.string_length = random.randint(1, 4)
self.prime,self.target=generate_prime_and_target(self.primes,self.targets,
self.string_length,global_prime_cardinality)
self.set_message(self.prime + self.target + ".")
self.instructions_completed = False
@on_output_message(r"\.$")
def check_ending(self, event):
self.instructions_completed = True
@on_message()
def check_character(self, event):
if not self.instructions_completed:
pass
elif event.message[-len(self.target):] == self.target:
self.set_result(1)
class GlobalStringPrimeTargetMax4(Task):
def __init__(self):
super(GlobalStringPrimeTargetMax4, self).__init__(
max_time=500)
@on_start()
def give_instructions(self, event):
self.string_length = random.randint(1, 4)
self.prime,self.target=generate_prime_and_target(global_primes,global_targets,self.string_length,global_prime_cardinality)
self.target += "."
self.set_message(self.prime + ".")
self.instructions_completed = False
@on_output_message(r"\.$")
def check_ending(self, event):
self.instructions_completed = True
@on_message()
def check_character(self, event):
if not self.instructions_completed:
pass
elif event.message[-len(self.target):] == self.target:
self.set_result(1)
class LocalStringPrimeTargetMax4(Task):
# for comments, see first Local task in this file
# get local primes and targets
primes,targets=generate_local_prime_and_target_mappings(global_prime_cardinality)
def __init__(self):
super(LocalStringPrimeTargetMax4, self).__init__(
max_time=500)
@on_start()
def give_instructions(self, event):
self.string_length = random.randint(1, 4)
self.prime,self.target=generate_prime_and_target(self.primes,self.targets,
self.string_length,global_prime_cardinality)
self.target += "."
self.set_message(self.prime + ".")
self.instructions_completed = False
@on_output_message(r"\.$")
def check_ending(self, event):
self.instructions_completed = True
@on_message()
def check_character(self, event):
if not self.instructions_completed:
pass
elif event.message[-len(self.target):] == self.target:
self.set_result(1)
class GlobalStringPrimeTargetMin5Max10(Task):
def __init__(self, env):
super(GlobalStringPrimeTargetMin5Max10, self).__init__(
max_time=500)
@on_start()
def give_instructions(self, event):
self.string_length = random.randint(5, 10)
self.prime,self.target=generate_prime_and_target(global_primes,global_targets,self.string_length,global_prime_cardinality)
self.target += "."
self.set_message(self.prime + ".")
self.instructions_completed = False
@on_output_message(r"\.$")
def check_ending(self, event):
self.instructions_completed = True
@on_message()
def check_character(self, event):
if not self.instructions_completed:
pass
elif event.message[-len(self.target):] == self.target:
self.set_result(1)
|
babyliynfg/cross | refs/heads/master | tools/project-creator/Python2.6.6/Lib/test/testcodec.py | 15 | """ Test Codecs (used by test_charmapcodec)
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x78: u"abc", # 1-n decoding mapping
"abc": 0x0078,# 1-n encoding mapping
0x01: None, # decoding mapping to <undefined>
0x79: u"", # decoding mapping to <remove character>
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k
|
buguelos/odoo | refs/heads/master | openerp/tools/pdf_utils.py | 456 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Copyright (c) 2003-2007 LOGILAB S.A. (Paris, FRANCE).
http://www.logilab.fr/ -- mailto:[email protected]
manipulate pdf and fdf files. pdftk recommended.
Notes regarding pdftk, pdf forms and fdf files (form definition file)
fields names can be extracted with:
pdftk orig.pdf generate_fdf output truc.fdf
to merge fdf and pdf:
pdftk orig.pdf fill_form test.fdf output result.pdf [flatten]
without flatten, one could further edit the resulting form.
with flatten, everything is turned into text.
"""
from __future__ import with_statement
import os
import tempfile
HEAD="""%FDF-1.2
%\xE2\xE3\xCF\xD3
1 0 obj
<<
/FDF
<<
/Fields [
"""
TAIL="""]
>>
>>
endobj
trailer
<<
/Root 1 0 R
>>
%%EOF
"""
def output_field(f):
return "\xfe\xff" + "".join( [ "\x00"+c for c in f ] )
def extract_keys(lines):
keys = []
for line in lines:
if line.startswith('/V'):
pass #print 'value',line
elif line.startswith('/T'):
key = line[7:-2]
key = ''.join(key.split('\x00'))
keys.append( key )
return keys
def write_field(out, key, value):
out.write("<<\n")
if value:
out.write("/V (%s)\n" %value)
else:
out.write("/V /\n")
out.write("/T (%s)\n" % output_field(key) )
out.write(">> \n")
def write_fields(out, fields):
out.write(HEAD)
for key in fields:
value = fields[key]
write_field(out, key, value)
# write_field(out, key+"a", value) # pour copie-carbone sur autres pages
out.write(TAIL)
def extract_keys_from_pdf(filename):
# what about using 'pdftk filename dump_data_fields' and parsing the output ?
tmp_file = tempfile.mkstemp(".fdf")[1]
try:
os.system('pdftk %s generate_fdf output \"%s\"' % (filename, tmp_file))
with open(tmp_file, "r") as ofile:
lines = ofile.readlines()
finally:
try:
os.remove(tmp_file)
except Exception:
pass # nothing to do
return extract_keys(lines)
def fill_pdf(infile, outfile, fields):
tmp_file = tempfile.mkstemp(".fdf")[1]
try:
with open(tmp_file, "w") as ofile:
write_fields(ofile, fields)
os.system('pdftk %s fill_form \"%s\" output %s flatten' % (infile, tmp_file, outfile))
finally:
try:
os.remove(tmp_file)
except Exception:
pass # nothing to do
def testfill_pdf(infile, outfile):
keys = extract_keys_from_pdf(infile)
fields = []
for key in keys:
fields.append( (key, key, '') )
fill_pdf(infile, outfile, fields)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
cuongnv23/ansible | refs/heads/devel | lib/ansible/modules/system/gluster_volume.py | 21 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Taneli Leppä <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
module: gluster_volume
short_description: Manage GlusterFS volumes
description:
- Create, remove, start, stop and tune GlusterFS volumes
version_added: '1.9'
options:
name:
description:
- The volume name
required: true
state:
description:
- Use present/absent ensure if a volume exists or not.
Use started/stopped to control its availability.
required: true
choices: ['present', 'absent', 'started', 'stopped']
cluster:
description:
- List of hosts to use for probing and brick setup
host:
description:
- Override local hostname (for peer probing purposes)
replicas:
description:
- Replica count for volume
arbiter:
description:
- Arbiter count for volume
version_added: '2.3'
stripes:
description:
- Stripe count for volume
disperses:
description:
- Disperse count for volume
version_added: '2.2'
redundancies:
description:
- Redundancy count for volume
version_added: '2.2'
transport:
description:
- Transport type for volume
default: 'tcp'
choices: ['tcp', 'rdma', 'tcp,rdma']
bricks:
description:
- Brick paths on servers. Multiple brick paths can be separated by commas.
aliases: ['brick']
start_on_create:
description:
- Controls whether the volume is started after creation or not
default: 'yes'
type: bool
rebalance:
description:
- Controls whether the cluster is rebalanced after changes
default: 'no'
type: bool
directory:
description:
- Directory for limit-usage
options:
description:
- A dictionary/hash with options/settings for the volume
quota:
description:
- Quota value for limit-usage (be sure to use 10.0MB instead of 10MB, see quota list)
force:
description:
- If brick is being created in the root partition, module will fail.
Set force to true to override this behaviour.
type: bool
notes:
- Requires cli tools for GlusterFS on servers
- Will add new bricks, but not remove them
author: Taneli Leppä (@rosmo)
"""
EXAMPLES = """
- name: create gluster volume
gluster_volume:
state: present
name: test1
bricks: /bricks/brick1/g1
rebalance: yes
cluster:
- 192.0.2.10
- 192.0.2.11
run_once: true
- name: tune
gluster_volume:
state: present
name: test1
options:
performance.cache-size: 256MB
- name: start gluster volume
gluster_volume:
state: started
name: test1
- name: limit usage
gluster_volume:
state: present
name: test1
directory: /foo
quota: 20.0MB
- name: stop gluster volume
gluster_volume:
state: stopped
name: test1
- name: remove gluster volume
gluster_volume:
state: absent
name: test1
- name: create gluster volume with multiple bricks
gluster_volume:
state: present
name: test2
bricks: /bricks/brick1/g2,/bricks/brick2/g2
cluster:
- 192.0.2.10
- 192.0.2.11
run_once: true
"""
import re
import socket
import time
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
glusterbin = ''
def run_gluster(gargs, **kwargs):
global glusterbin
global module
args = [glusterbin, '--mode=script']
args.extend(gargs)
try:
rc, out, err = module.run_command(args, **kwargs)
if rc != 0:
module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' %
(' '.join(args), rc, out or err), exception=traceback.format_exc())
except Exception as e:
module.fail_json(msg='error running gluster (%s) command: %s' % (' '.join(args),
to_native(e)), exception=traceback.format_exc())
return out
def run_gluster_nofail(gargs, **kwargs):
global glusterbin
global module
args = [glusterbin]
args.extend(gargs)
rc, out, err = module.run_command(args, **kwargs)
if rc != 0:
return None
return out
def get_peers():
out = run_gluster([ 'peer', 'status'])
peers = {}
hostname = None
uuid = None
state = None
shortNames = False
for row in out.split('\n'):
if ': ' in row:
key, value = row.split(': ')
if key.lower() == 'hostname':
hostname = value
shortNames = False
if key.lower() == 'uuid':
uuid = value
if key.lower() == 'state':
state = value
peers[hostname] = [uuid, state]
elif row.lower() == 'other names:':
shortNames = True
elif row != '' and shortNames is True:
peers[row] = [uuid, state]
elif row == '':
shortNames = False
return peers
def get_volumes():
out = run_gluster([ 'volume', 'info' ])
volumes = {}
volume = {}
for row in out.split('\n'):
if ': ' in row:
key, value = row.split(': ')
if key.lower() == 'volume name':
volume['name'] = value
volume['options'] = {}
volume['quota'] = False
if key.lower() == 'volume id':
volume['id'] = value
if key.lower() == 'status':
volume['status'] = value
if key.lower() == 'transport-type':
volume['transport'] = value
if value.lower().endswith(' (arbiter)'):
if not 'arbiters' in volume:
volume['arbiters'] = []
value = value[:-10]
volume['arbiters'].append(value)
if key.lower() != 'bricks' and key.lower()[:5] == 'brick':
if not 'bricks' in volume:
volume['bricks'] = []
volume['bricks'].append(value)
# Volume options
if '.' in key:
if not 'options' in volume:
volume['options'] = {}
volume['options'][key] = value
if key == 'features.quota' and value == 'on':
volume['quota'] = True
else:
if row.lower() != 'bricks:' and row.lower() != 'options reconfigured:':
if len(volume) > 0:
volumes[volume['name']] = volume
volume = {}
return volumes
def get_quotas(name, nofail):
quotas = {}
if nofail:
out = run_gluster_nofail([ 'volume', 'quota', name, 'list' ])
if not out:
return quotas
else:
out = run_gluster([ 'volume', 'quota', name, 'list' ])
for row in out.split('\n'):
if row[:1] == '/':
q = re.split('\s+', row)
quotas[q[0]] = q[1]
return quotas
def wait_for_peer(host):
for x in range(0, 4):
peers = get_peers()
if host in peers and peers[host][1].lower().find('peer in cluster') != -1:
return True
time.sleep(1)
return False
def probe(host, myhostname):
global module
out = run_gluster([ 'peer', 'probe', host ])
if out.find('localhost') == -1 and not wait_for_peer(host):
module.fail_json(msg='failed to probe peer %s on %s' % (host, myhostname))
def probe_all_peers(hosts, peers, myhostname):
for host in hosts:
host = host.strip() # Clean up any extra space for exact comparison
if host not in peers:
probe(host, myhostname)
def create_volume(name, stripe, replica, arbiter, disperse, redundancy, transport, hosts, bricks, force):
args = [ 'volume', 'create' ]
args.append(name)
if stripe:
args.append('stripe')
args.append(str(stripe))
if replica:
args.append('replica')
args.append(str(replica))
if arbiter:
args.append('arbiter')
args.append(str(arbiter))
if disperse:
args.append('disperse')
args.append(str(disperse))
if redundancy:
args.append('redundancy')
args.append(str(redundancy))
args.append('transport')
args.append(transport)
for brick in bricks:
for host in hosts:
args.append(('%s:%s' % (host, brick)))
if force:
args.append('force')
run_gluster(args)
def start_volume(name):
run_gluster([ 'volume', 'start', name ])
def stop_volume(name):
run_gluster([ 'volume', 'stop', name ])
def set_volume_option(name, option, parameter):
run_gluster([ 'volume', 'set', name, option, parameter ])
def add_bricks(name, new_bricks, stripe, replica, force):
args = [ 'volume', 'add-brick', name ]
if stripe:
args.append('stripe')
args.append(str(stripe))
if replica:
args.append('replica')
args.append(str(replica))
args.extend(new_bricks)
if force:
args.append('force')
run_gluster(args)
def do_rebalance(name):
run_gluster([ 'volume', 'rebalance', name, 'start' ])
def enable_quota(name):
run_gluster([ 'volume', 'quota', name, 'enable' ])
def set_quota(name, directory, value):
run_gluster([ 'volume', 'quota', name, 'limit-usage', directory, value ])
def main():
### MAIN ###
global module
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, aliases=['volume']),
state=dict(required=True, choices=['present', 'absent', 'started', 'stopped']),
cluster=dict(default=None, type='list'),
host=dict(default=None),
stripes=dict(default=None, type='int'),
replicas=dict(default=None, type='int'),
arbiters=dict(default=None, type='int'),
disperses=dict(default=None, type='int'),
redundancies=dict(default=None, type='int'),
transport=dict(default='tcp', choices=['tcp', 'rdma', 'tcp,rdma']),
bricks=dict(default=None, aliases=['brick']),
start_on_create=dict(default=True, type='bool'),
rebalance=dict(default=False, type='bool'),
options=dict(default={}, type='dict'),
quota=dict(),
directory=dict(default=None),
force=dict(default=False, type='bool'),
)
)
global glusterbin
glusterbin = module.get_bin_path('gluster', True)
changed = False
action = module.params['state']
volume_name = module.params['name']
cluster= module.params['cluster']
brick_paths = module.params['bricks']
stripes = module.params['stripes']
replicas = module.params['replicas']
arbiters = module.params['arbiters']
disperses = module.params['disperses']
redundancies = module.params['redundancies']
transport = module.params['transport']
myhostname = module.params['host']
start_on_create = module.boolean(module.params['start_on_create'])
rebalance = module.boolean(module.params['rebalance'])
force = module.boolean(module.params['force'])
if not myhostname:
myhostname = socket.gethostname()
# Clean up if last element is empty. Consider that yml can look like this:
# cluster="{% for host in groups['glusterfs'] %}{{ hostvars[host]['private_ip'] }},{% endfor %}"
if cluster is not None and len(cluster) > 1 and cluster[-1] == '':
cluster = cluster[0:-1]
if cluster is None or cluster[0] == '':
cluster = [myhostname]
if brick_paths is not None and "," in brick_paths:
brick_paths = brick_paths.split(",")
else:
brick_paths = [brick_paths]
options = module.params['options']
quota = module.params['quota']
directory = module.params['directory']
# get current state info
peers = get_peers()
volumes = get_volumes()
quotas = {}
if volume_name in volumes and volumes[volume_name]['quota'] and volumes[volume_name]['status'].lower() == 'started':
quotas = get_quotas(volume_name, True)
# do the work!
if action == 'absent':
if volume_name in volumes:
if volumes[volume_name]['status'].lower() != 'stopped':
stop_volume(volume_name)
run_gluster([ 'volume', 'delete', volume_name ])
changed = True
if action == 'present':
probe_all_peers(cluster, peers, myhostname)
# create if it doesn't exist
if volume_name not in volumes:
create_volume(volume_name, stripes, replicas, arbiters, disperses, redundancies, transport, cluster, brick_paths, force)
volumes = get_volumes()
changed = True
if volume_name in volumes:
if volumes[volume_name]['status'].lower() != 'started' and start_on_create:
start_volume(volume_name)
changed = True
# switch bricks
new_bricks = []
removed_bricks = []
all_bricks = []
for node in cluster:
for brick_path in brick_paths:
brick = '%s:%s' % (node, brick_path)
all_bricks.append(brick)
if brick not in volumes[volume_name]['bricks']:
new_bricks.append(brick)
# this module does not yet remove bricks, but we check those anyways
for brick in volumes[volume_name]['bricks']:
if brick not in all_bricks:
removed_bricks.append(brick)
if new_bricks:
add_bricks(volume_name, new_bricks, stripes, replicas, force)
changed = True
# handle quotas
if quota:
if not volumes[volume_name]['quota']:
enable_quota(volume_name)
quotas = get_quotas(volume_name, False)
if directory not in quotas or quotas[directory] != quota:
set_quota(volume_name, directory, quota)
changed = True
# set options
for option in options.keys():
if option not in volumes[volume_name]['options'] or volumes[volume_name]['options'][option] != options[option]:
set_volume_option(volume_name, option, options[option])
changed = True
else:
module.fail_json(msg='failed to create volume %s' % volume_name)
if action != 'delete' and volume_name not in volumes:
module.fail_json(msg='volume not found %s' % volume_name)
if action == 'started':
if volumes[volume_name]['status'].lower() != 'started':
start_volume(volume_name)
changed = True
if action == 'stopped':
if volumes[volume_name]['status'].lower() != 'stopped':
stop_volume(volume_name)
changed = True
if changed:
volumes = get_volumes()
if rebalance:
do_rebalance(volume_name)
facts = {}
facts['glusterfs'] = { 'peers': peers, 'volumes': volumes, 'quotas': quotas }
module.exit_json(changed=changed, ansible_facts=facts)
if __name__ == '__main__':
main()
|
OpenDataNode/ckanext-odn-pipeline | refs/heads/master | ckanext/internal_api/plugin.py | 1 | '''
Created on 9.2.2015
@author: mvi
'''
from ckan.common import _, c
import ckan.logic as logic
import ckan.plugins as plugins
from ckanext.model.pipelines import Pipelines
import urllib
import logging
import pylons.config as config
from ckan.model.user import User
import json
from ckan.controllers.package import PackageController
NotFound = logic.NotFound
get_action = logic.get_action
rdf_uri_template = config.get('odn.storage.rdf.uri.template', '')
token_from_cfg = config.get("ckan.auth.internal_api.token", None)
log = logging.getLogger('ckanext')
def check_and_bust(key, dict):
if key not in dict or not dict[key]:
raise NotFound("Key '{0}' was not found or has no value set.".format(key))
# ============= AUTHENTIFICATION =============
class MyUser(User):
@classmethod
def by_id(cls, id):
import ckan.model.meta as meta
obj = meta.Session.query(User).autoflush(False)
return obj.filter_by(id=id).first()
def change_auth_user(context, user_id):
if not user_id:
return
context['user'] = None
context['auth_user_obj'] = None
c.user = None
c.userobj = None
user = MyUser.by_id(user_id)
if user:
log.debug('internal_api: Setting user to {username}'.format(username=user.name))
context['user'] = user.name
context['auth_user_obj'] = user
c.user = user.name
c.userobj = user
def internal_api_auth(context, data_dict=None):
check_and_bust('token', data_dict)
token = data_dict['token']
if not token or token != token_from_cfg:
return {'success': False, 'msg': _('internal api: Authentication failed.')}
return {'success': True }
# ============= RESOURCE CREATE logic =============
# data_dict = {
# 'action':'resource_create',
# 'pipeline_id': 11, (optional)
# 'user_id': 'CKAN USER ID', (optional)
# 'token': 'token',
# 'type': 'RDF', (optional)
# 'value': 'specific value for type', (optional)
# 'data': {} (optional)
# }
def internal_api(context, data_dict=None):
check_and_bust('action', data_dict)
check_and_bust('user_id', data_dict)
user_id = data_dict.get('user_id', None)
change_auth_user(context, user_id)
log.debug('internal_api: action = {0}'.format(data_dict['action']))
log.debug('internal_api: user_id = {0}'.format(user_id))
logic.check_access('internal_api', context, data_dict)
action = data_dict['action']
data = data_dict.get('data', {})
if isinstance(data, basestring):
# if upload data is actually a string
data = json.loads(data)
if 'resource_download' in action:
return resource_download(context, data)
# type == 'FILE'
if data_dict.has_key('upload'):
data['upload'] = data_dict['upload']
data['url'] = ''
# any type
if data_dict.has_key('pipeline_id') and data_dict['pipeline_id']:
log.debug('internal_api: pipeline_id = {0}'.format(data_dict['pipeline_id']))
pipeline_id = data_dict['pipeline_id']
dataset_to_pipeline = Pipelines.by_pipeline_id(pipeline_id)
if dataset_to_pipeline:
# converting pipeline_id to 'id' or 'package_id'
if action in ['resource_create']:
data['package_id'] = dataset_to_pipeline.package_id
elif action in ['package_update', 'package_show']:
data['id'] = dataset_to_pipeline.package_id
else:
raise NotFound('No dataset found for pipeline_id = {0}'.format(pipeline_id))
# type == 'RDF'
if data_dict.has_key('type') and data_dict['type'] == 'RDF':
data['url'] = get_rdf_url(data_dict)
return get_action(action)(context, data)
def resource_download(context, data):
check_and_bust('package_id', data)
check_and_bust('id', data)
# changing POST request to GET request
# needed because only GET request can return file
plugins.toolkit.request.environ['REQUEST_METHOD'] = 'GET'
package_id = data.get('package_id')
resource_id = data.get('id')
rsc = get_action('resource_show')(context, {'id': resource_id})
pkg = get_action('package_show')(context, {'id': package_id})
return PackageController().resource_download(package_id, resource_id)
def get_rdf_url(data_dict):
check_and_bust('value', data_dict)
storage_id = data_dict['value']
# escaping 'wrong' characters
url = rdf_uri_template.replace('{storage_id}', str(storage_id))
return urllib.quote(url, safe="%/:=&?~#+!$,;'@()*[]")
# ============= PLUGIN =============
class InternalApiPlugin(plugins.SingletonPlugin):
plugins.implements(plugins.IAuthFunctions)
plugins.implements(plugins.IActions)
def get_auth_functions(self):
return {'internal_api': internal_api_auth}
def get_actions(self):
return {'internal_api': internal_api} |
igemsoftware/SYSU-Software2013 | refs/heads/master | project/Python27/Lib/site-packages/win32comext/adsi/adsicon.py | 29 | ADS_ATTR_CLEAR = ( 1 )
ADS_ATTR_UPDATE = ( 2 )
ADS_ATTR_APPEND = ( 3 )
ADS_ATTR_DELETE = ( 4 )
ADS_EXT_MINEXTDISPID = ( 1 )
ADS_EXT_MAXEXTDISPID = ( 16777215 )
ADS_EXT_INITCREDENTIALS = ( 1 )
ADS_EXT_INITIALIZE_COMPLETE = ( 2 )
ADS_SEARCHPREF_ASYNCHRONOUS = 0
ADS_SEARCHPREF_DEREF_ALIASES = 1
ADS_SEARCHPREF_SIZE_LIMIT = 2
ADS_SEARCHPREF_TIME_LIMIT = 3
ADS_SEARCHPREF_ATTRIBTYPES_ONLY = 4
ADS_SEARCHPREF_SEARCH_SCOPE = 5
ADS_SEARCHPREF_TIMEOUT = 6
ADS_SEARCHPREF_PAGESIZE = 7
ADS_SEARCHPREF_PAGED_TIME_LIMIT = 8
ADS_SEARCHPREF_CHASE_REFERRALS = 9
ADS_SEARCHPREF_SORT_ON = 10
ADS_SEARCHPREF_CACHE_RESULTS = 11
ADS_SEARCHPREF_DIRSYNC = 12
ADS_SEARCHPREF_TOMBSTONE = 13
ADS_SCOPE_BASE = 0
ADS_SCOPE_ONELEVEL = 1
ADS_SCOPE_SUBTREE = 2
ADS_SECURE_AUTHENTICATION = 0x1
ADS_USE_ENCRYPTION = 0x2
ADS_USE_SSL = 0x2
ADS_READONLY_SERVER = 0x4
ADS_PROMPT_CREDENTIALS = 0x8
ADS_NO_AUTHENTICATION = 0x10
ADS_FAST_BIND = 0x20
ADS_USE_SIGNING = 0x40
ADS_USE_SEALING = 0x80
ADS_USE_DELEGATION = 0x100
ADS_SERVER_BIND = 0x200
ADSTYPE_INVALID = 0
ADSTYPE_DN_STRING = ADSTYPE_INVALID + 1
ADSTYPE_CASE_EXACT_STRING = ADSTYPE_DN_STRING + 1
ADSTYPE_CASE_IGNORE_STRING = ADSTYPE_CASE_EXACT_STRING + 1
ADSTYPE_PRINTABLE_STRING = ADSTYPE_CASE_IGNORE_STRING + 1
ADSTYPE_NUMERIC_STRING = ADSTYPE_PRINTABLE_STRING + 1
ADSTYPE_BOOLEAN = ADSTYPE_NUMERIC_STRING + 1
ADSTYPE_INTEGER = ADSTYPE_BOOLEAN + 1
ADSTYPE_OCTET_STRING = ADSTYPE_INTEGER + 1
ADSTYPE_UTC_TIME = ADSTYPE_OCTET_STRING + 1
ADSTYPE_LARGE_INTEGER = ADSTYPE_UTC_TIME + 1
ADSTYPE_PROV_SPECIFIC = ADSTYPE_LARGE_INTEGER + 1
ADSTYPE_OBJECT_CLASS = ADSTYPE_PROV_SPECIFIC + 1
ADSTYPE_CASEIGNORE_LIST = ADSTYPE_OBJECT_CLASS + 1
ADSTYPE_OCTET_LIST = ADSTYPE_CASEIGNORE_LIST + 1
ADSTYPE_PATH = ADSTYPE_OCTET_LIST + 1
ADSTYPE_POSTALADDRESS = ADSTYPE_PATH + 1
ADSTYPE_TIMESTAMP = ADSTYPE_POSTALADDRESS + 1
ADSTYPE_BACKLINK = ADSTYPE_TIMESTAMP + 1
ADSTYPE_TYPEDNAME = ADSTYPE_BACKLINK + 1
ADSTYPE_HOLD = ADSTYPE_TYPEDNAME + 1
ADSTYPE_NETADDRESS = ADSTYPE_HOLD + 1
ADSTYPE_REPLICAPOINTER = ADSTYPE_NETADDRESS + 1
ADSTYPE_FAXNUMBER = ADSTYPE_REPLICAPOINTER + 1
ADSTYPE_EMAIL = ADSTYPE_FAXNUMBER + 1
ADSTYPE_NT_SECURITY_DESCRIPTOR = ADSTYPE_EMAIL + 1
ADSTYPE_UNKNOWN = ADSTYPE_NT_SECURITY_DESCRIPTOR + 1
ADSTYPE_DN_WITH_BINARY = ADSTYPE_UNKNOWN + 1
ADSTYPE_DN_WITH_STRING = ADSTYPE_DN_WITH_BINARY + 1
ADS_PROPERTY_CLEAR = 1
ADS_PROPERTY_UPDATE = 2
ADS_PROPERTY_APPEND = 3
ADS_PROPERTY_DELETE = 4
ADS_SYSTEMFLAG_DISALLOW_DELETE = -2147483648
ADS_SYSTEMFLAG_CONFIG_ALLOW_RENAME = 0x40000000
ADS_SYSTEMFLAG_CONFIG_ALLOW_MOVE = 0x20000000
ADS_SYSTEMFLAG_CONFIG_ALLOW_LIMITED_MOVE = 0x10000000
ADS_SYSTEMFLAG_DOMAIN_DISALLOW_RENAME = -2147483648
ADS_SYSTEMFLAG_DOMAIN_DISALLOW_MOVE = 0x4000000
ADS_SYSTEMFLAG_CR_NTDS_NC = 0x1
ADS_SYSTEMFLAG_CR_NTDS_DOMAIN = 0x2
ADS_SYSTEMFLAG_ATTR_NOT_REPLICATED = 0x1
ADS_SYSTEMFLAG_ATTR_IS_CONSTRUCTED = 0x4
ADS_GROUP_TYPE_GLOBAL_GROUP = 0x2
ADS_GROUP_TYPE_DOMAIN_LOCAL_GROUP = 0x4
ADS_GROUP_TYPE_LOCAL_GROUP = 0x4
ADS_GROUP_TYPE_UNIVERSAL_GROUP = 0x8
ADS_GROUP_TYPE_SECURITY_ENABLED = -2147483648
ADS_UF_SCRIPT = 0x1
ADS_UF_ACCOUNTDISABLE = 0x2
ADS_UF_HOMEDIR_REQUIRED = 0x8
ADS_UF_LOCKOUT = 0x10
ADS_UF_PASSWD_NOTREQD = 0x20
ADS_UF_PASSWD_CANT_CHANGE = 0x40
ADS_UF_ENCRYPTED_TEXT_PASSWORD_ALLOWED = 0x80
ADS_UF_TEMP_DUPLICATE_ACCOUNT = 0x100
ADS_UF_NORMAL_ACCOUNT = 0x200
ADS_UF_INTERDOMAIN_TRUST_ACCOUNT = 0x800
ADS_UF_WORKSTATION_TRUST_ACCOUNT = 0x1000
ADS_UF_SERVER_TRUST_ACCOUNT = 0x2000
ADS_UF_DONT_EXPIRE_PASSWD = 0x10000
ADS_UF_MNS_LOGON_ACCOUNT = 0x20000
ADS_UF_SMARTCARD_REQUIRED = 0x40000
ADS_UF_TRUSTED_FOR_DELEGATION = 0x80000
ADS_UF_NOT_DELEGATED = 0x100000
ADS_UF_USE_DES_KEY_ONLY = 0x200000
ADS_UF_DONT_REQUIRE_PREAUTH = 0x400000
ADS_UF_PASSWORD_EXPIRED = 0x800000
ADS_UF_TRUSTED_TO_AUTHENTICATE_FOR_DELEGATION = 0x1000000
ADS_RIGHT_DELETE = 0x10000
ADS_RIGHT_READ_CONTROL = 0x20000
ADS_RIGHT_WRITE_DAC = 0x40000
ADS_RIGHT_WRITE_OWNER = 0x80000
ADS_RIGHT_SYNCHRONIZE = 0x100000
ADS_RIGHT_ACCESS_SYSTEM_SECURITY = 0x1000000
ADS_RIGHT_GENERIC_READ = -2147483648
ADS_RIGHT_GENERIC_WRITE = 0x40000000
ADS_RIGHT_GENERIC_EXECUTE = 0x20000000
ADS_RIGHT_GENERIC_ALL = 0x10000000
ADS_RIGHT_DS_CREATE_CHILD = 0x1
ADS_RIGHT_DS_DELETE_CHILD = 0x2
ADS_RIGHT_ACTRL_DS_LIST = 0x4
ADS_RIGHT_DS_SELF = 0x8
ADS_RIGHT_DS_READ_PROP = 0x10
ADS_RIGHT_DS_WRITE_PROP = 0x20
ADS_RIGHT_DS_DELETE_TREE = 0x40
ADS_RIGHT_DS_LIST_OBJECT = 0x80
ADS_RIGHT_DS_CONTROL_ACCESS = 0x100
ADS_ACETYPE_ACCESS_ALLOWED = 0
ADS_ACETYPE_ACCESS_DENIED = 0x1
ADS_ACETYPE_SYSTEM_AUDIT = 0x2
ADS_ACETYPE_ACCESS_ALLOWED_OBJECT = 0x5
ADS_ACETYPE_ACCESS_DENIED_OBJECT = 0x6
ADS_ACETYPE_SYSTEM_AUDIT_OBJECT = 0x7
ADS_ACETYPE_SYSTEM_ALARM_OBJECT = 0x8
ADS_ACETYPE_ACCESS_ALLOWED_CALLBACK = 0x9
ADS_ACETYPE_ACCESS_DENIED_CALLBACK = 0xa
ADS_ACETYPE_ACCESS_ALLOWED_CALLBACK_OBJECT = 0xb
ADS_ACETYPE_ACCESS_DENIED_CALLBACK_OBJECT = 0xc
ADS_ACETYPE_SYSTEM_AUDIT_CALLBACK = 0xd
ADS_ACETYPE_SYSTEM_ALARM_CALLBACK = 0xe
ADS_ACETYPE_SYSTEM_AUDIT_CALLBACK_OBJECT = 0xf
ADS_ACETYPE_SYSTEM_ALARM_CALLBACK_OBJECT = 0x10
ADS_ACEFLAG_INHERIT_ACE = 0x2
ADS_ACEFLAG_NO_PROPAGATE_INHERIT_ACE = 0x4
ADS_ACEFLAG_INHERIT_ONLY_ACE = 0x8
ADS_ACEFLAG_INHERITED_ACE = 0x10
ADS_ACEFLAG_VALID_INHERIT_FLAGS = 0x1f
ADS_ACEFLAG_SUCCESSFUL_ACCESS = 0x40
ADS_ACEFLAG_FAILED_ACCESS = 0x80
ADS_FLAG_OBJECT_TYPE_PRESENT = 0x1
ADS_FLAG_INHERITED_OBJECT_TYPE_PRESENT = 0x2
ADS_SD_CONTROL_SE_OWNER_DEFAULTED = 0x1
ADS_SD_CONTROL_SE_GROUP_DEFAULTED = 0x2
ADS_SD_CONTROL_SE_DACL_PRESENT = 0x4
ADS_SD_CONTROL_SE_DACL_DEFAULTED = 0x8
ADS_SD_CONTROL_SE_SACL_PRESENT = 0x10
ADS_SD_CONTROL_SE_SACL_DEFAULTED = 0x20
ADS_SD_CONTROL_SE_DACL_AUTO_INHERIT_REQ = 0x100
ADS_SD_CONTROL_SE_SACL_AUTO_INHERIT_REQ = 0x200
ADS_SD_CONTROL_SE_DACL_AUTO_INHERITED = 0x400
ADS_SD_CONTROL_SE_SACL_AUTO_INHERITED = 0x800
ADS_SD_CONTROL_SE_DACL_PROTECTED = 0x1000
ADS_SD_CONTROL_SE_SACL_PROTECTED = 0x2000
ADS_SD_CONTROL_SE_SELF_RELATIVE = 0x8000
ADS_SD_REVISION_DS = 4
ADS_NAME_TYPE_1779 = 1
ADS_NAME_TYPE_CANONICAL = 2
ADS_NAME_TYPE_NT4 = 3
ADS_NAME_TYPE_DISPLAY = 4
ADS_NAME_TYPE_DOMAIN_SIMPLE = 5
ADS_NAME_TYPE_ENTERPRISE_SIMPLE = 6
ADS_NAME_TYPE_GUID = 7
ADS_NAME_TYPE_UNKNOWN = 8
ADS_NAME_TYPE_USER_PRINCIPAL_NAME = 9
ADS_NAME_TYPE_CANONICAL_EX = 10
ADS_NAME_TYPE_SERVICE_PRINCIPAL_NAME = 11
ADS_NAME_TYPE_SID_OR_SID_HISTORY_NAME = 12
ADS_NAME_INITTYPE_DOMAIN = 1
ADS_NAME_INITTYPE_SERVER = 2
ADS_NAME_INITTYPE_GC = 3
ADS_OPTION_SERVERNAME = 0
ADS_OPTION_REFERRALS = ADS_OPTION_SERVERNAME + 1
ADS_OPTION_PAGE_SIZE = ADS_OPTION_REFERRALS + 1
ADS_OPTION_SECURITY_MASK = ADS_OPTION_PAGE_SIZE + 1
ADS_OPTION_MUTUAL_AUTH_STATUS = ADS_OPTION_SECURITY_MASK + 1
ADS_OPTION_QUOTA = ADS_OPTION_MUTUAL_AUTH_STATUS + 1
ADS_OPTION_PASSWORD_PORTNUMBER = ADS_OPTION_QUOTA + 1
ADS_OPTION_PASSWORD_METHOD = ADS_OPTION_PASSWORD_PORTNUMBER + 1
ADS_SECURITY_INFO_OWNER = 0x1
ADS_SECURITY_INFO_GROUP = 0x2
ADS_SECURITY_INFO_DACL = 0x4
ADS_SECURITY_INFO_SACL = 0x8
ADS_SETTYPE_FULL = 1
ADS_SETTYPE_PROVIDER = 2
ADS_SETTYPE_SERVER = 3
ADS_SETTYPE_DN = 4
ADS_FORMAT_WINDOWS = 1
ADS_FORMAT_WINDOWS_NO_SERVER = 2
ADS_FORMAT_WINDOWS_DN = 3
ADS_FORMAT_WINDOWS_PARENT = 4
ADS_FORMAT_X500 = 5
ADS_FORMAT_X500_NO_SERVER = 6
ADS_FORMAT_X500_DN = 7
ADS_FORMAT_X500_PARENT = 8
ADS_FORMAT_SERVER = 9
ADS_FORMAT_PROVIDER = 10
ADS_FORMAT_LEAF = 11
ADS_DISPLAY_FULL = 1
ADS_DISPLAY_VALUE_ONLY = 2
ADS_ESCAPEDMODE_DEFAULT = 1
ADS_ESCAPEDMODE_ON = 2
ADS_ESCAPEDMODE_OFF = 3
ADS_ESCAPEDMODE_OFF_EX = 4
ADS_PATH_FILE = 1
ADS_PATH_FILESHARE = 2
ADS_PATH_REGISTRY = 3
ADS_SD_FORMAT_IID = 1
ADS_SD_FORMAT_RAW = 2
ADS_SD_FORMAT_HEXSTRING = 3
# Generated by h2py from AdsErr.h
def _HRESULT_TYPEDEF_(_sc): return _sc
E_ADS_BAD_PATHNAME = _HRESULT_TYPEDEF_((-2147463168))
E_ADS_INVALID_DOMAIN_OBJECT = _HRESULT_TYPEDEF_((-2147463167))
E_ADS_INVALID_USER_OBJECT = _HRESULT_TYPEDEF_((-2147463166))
E_ADS_INVALID_COMPUTER_OBJECT = _HRESULT_TYPEDEF_((-2147463165))
E_ADS_UNKNOWN_OBJECT = _HRESULT_TYPEDEF_((-2147463164))
E_ADS_PROPERTY_NOT_SET = _HRESULT_TYPEDEF_((-2147463163))
E_ADS_PROPERTY_NOT_SUPPORTED = _HRESULT_TYPEDEF_((-2147463162))
E_ADS_PROPERTY_INVALID = _HRESULT_TYPEDEF_((-2147463161))
E_ADS_BAD_PARAMETER = _HRESULT_TYPEDEF_((-2147463160))
E_ADS_OBJECT_UNBOUND = _HRESULT_TYPEDEF_((-2147463159))
E_ADS_PROPERTY_NOT_MODIFIED = _HRESULT_TYPEDEF_((-2147463158))
E_ADS_PROPERTY_MODIFIED = _HRESULT_TYPEDEF_((-2147463157))
E_ADS_CANT_CONVERT_DATATYPE = _HRESULT_TYPEDEF_((-2147463156))
E_ADS_PROPERTY_NOT_FOUND = _HRESULT_TYPEDEF_((-2147463155))
E_ADS_OBJECT_EXISTS = _HRESULT_TYPEDEF_((-2147463154))
E_ADS_SCHEMA_VIOLATION = _HRESULT_TYPEDEF_((-2147463153))
E_ADS_COLUMN_NOT_SET = _HRESULT_TYPEDEF_((-2147463152))
S_ADS_ERRORSOCCURRED = _HRESULT_TYPEDEF_(0x00005011)
S_ADS_NOMORE_ROWS = _HRESULT_TYPEDEF_(0x00005012)
S_ADS_NOMORE_COLUMNS = _HRESULT_TYPEDEF_(0x00005013)
E_ADS_INVALID_FILTER = _HRESULT_TYPEDEF_((-2147463148))
# ADS_DEREFENUM enum
ADS_DEREF_NEVER = 0
ADS_DEREF_SEARCHING = 1
ADS_DEREF_FINDING = 2
ADS_DEREF_ALWAYS = 3
# ADS_PREFERENCES_ENUM
ADSIPROP_ASYNCHRONOUS = 0
ADSIPROP_DEREF_ALIASES = 0x1
ADSIPROP_SIZE_LIMIT = 0x2
ADSIPROP_TIME_LIMIT = 0x3
ADSIPROP_ATTRIBTYPES_ONLY = 0x4
ADSIPROP_SEARCH_SCOPE = 0x5
ADSIPROP_TIMEOUT = 0x6
ADSIPROP_PAGESIZE = 0x7
ADSIPROP_PAGED_TIME_LIMIT = 0x8
ADSIPROP_CHASE_REFERRALS = 0x9
ADSIPROP_SORT_ON = 0xa
ADSIPROP_CACHE_RESULTS = 0xb
ADSIPROP_ADSIFLAG = 0xc
# ADSI_DIALECT_ENUM
ADSI_DIALECT_LDAP = 0
ADSI_DIALECT_SQL = 0x1
# ADS_CHASE_REFERRALS_ENUM
ADS_CHASE_REFERRALS_NEVER = 0
ADS_CHASE_REFERRALS_SUBORDINATE = 0x20
ADS_CHASE_REFERRALS_EXTERNAL = 0x40
ADS_CHASE_REFERRALS_ALWAYS = ADS_CHASE_REFERRALS_SUBORDINATE | ADS_CHASE_REFERRALS_EXTERNAL
# Generated by h2py from ObjSel.h
DSOP_SCOPE_TYPE_TARGET_COMPUTER = 0x00000001
DSOP_SCOPE_TYPE_UPLEVEL_JOINED_DOMAIN = 0x00000002
DSOP_SCOPE_TYPE_DOWNLEVEL_JOINED_DOMAIN = 0x00000004
DSOP_SCOPE_TYPE_ENTERPRISE_DOMAIN = 0x00000008
DSOP_SCOPE_TYPE_GLOBAL_CATALOG = 0x00000010
DSOP_SCOPE_TYPE_EXTERNAL_UPLEVEL_DOMAIN = 0x00000020
DSOP_SCOPE_TYPE_EXTERNAL_DOWNLEVEL_DOMAIN = 0x00000040
DSOP_SCOPE_TYPE_WORKGROUP = 0x00000080
DSOP_SCOPE_TYPE_USER_ENTERED_UPLEVEL_SCOPE = 0x00000100
DSOP_SCOPE_TYPE_USER_ENTERED_DOWNLEVEL_SCOPE = 0x00000200
DSOP_SCOPE_FLAG_STARTING_SCOPE = 0x00000001
DSOP_SCOPE_FLAG_WANT_PROVIDER_WINNT = 0x00000002
DSOP_SCOPE_FLAG_WANT_PROVIDER_LDAP = 0x00000004
DSOP_SCOPE_FLAG_WANT_PROVIDER_GC = 0x00000008
DSOP_SCOPE_FLAG_WANT_SID_PATH = 0x00000010
DSOP_SCOPE_FLAG_WANT_DOWNLEVEL_BUILTIN_PATH = 0x00000020
DSOP_SCOPE_FLAG_DEFAULT_FILTER_USERS = 0x00000040
DSOP_SCOPE_FLAG_DEFAULT_FILTER_GROUPS = 0x00000080
DSOP_SCOPE_FLAG_DEFAULT_FILTER_COMPUTERS = 0x00000100
DSOP_SCOPE_FLAG_DEFAULT_FILTER_CONTACTS = 0x00000200
DSOP_FILTER_INCLUDE_ADVANCED_VIEW = 0x00000001
DSOP_FILTER_USERS = 0x00000002
DSOP_FILTER_BUILTIN_GROUPS = 0x00000004
DSOP_FILTER_WELL_KNOWN_PRINCIPALS = 0x00000008
DSOP_FILTER_UNIVERSAL_GROUPS_DL = 0x00000010
DSOP_FILTER_UNIVERSAL_GROUPS_SE = 0x00000020
DSOP_FILTER_GLOBAL_GROUPS_DL = 0x00000040
DSOP_FILTER_GLOBAL_GROUPS_SE = 0x00000080
DSOP_FILTER_DOMAIN_LOCAL_GROUPS_DL = 0x00000100
DSOP_FILTER_DOMAIN_LOCAL_GROUPS_SE = 0x00000200
DSOP_FILTER_CONTACTS = 0x00000400
DSOP_FILTER_COMPUTERS = 0x00000800
DSOP_DOWNLEVEL_FILTER_USERS = (-2147483647)
DSOP_DOWNLEVEL_FILTER_LOCAL_GROUPS = (-2147483646)
DSOP_DOWNLEVEL_FILTER_GLOBAL_GROUPS = (-2147483644)
DSOP_DOWNLEVEL_FILTER_COMPUTERS = (-2147483640)
DSOP_DOWNLEVEL_FILTER_WORLD = (-2147483632)
DSOP_DOWNLEVEL_FILTER_AUTHENTICATED_USER = (-2147483616)
DSOP_DOWNLEVEL_FILTER_ANONYMOUS = (-2147483584)
DSOP_DOWNLEVEL_FILTER_BATCH = (-2147483520)
DSOP_DOWNLEVEL_FILTER_CREATOR_OWNER = (-2147483392)
DSOP_DOWNLEVEL_FILTER_CREATOR_GROUP = (-2147483136)
DSOP_DOWNLEVEL_FILTER_DIALUP = (-2147482624)
DSOP_DOWNLEVEL_FILTER_INTERACTIVE = (-2147481600)
DSOP_DOWNLEVEL_FILTER_NETWORK = (-2147479552)
DSOP_DOWNLEVEL_FILTER_SERVICE = (-2147475456)
DSOP_DOWNLEVEL_FILTER_SYSTEM = (-2147467264)
DSOP_DOWNLEVEL_FILTER_EXCLUDE_BUILTIN_GROUPS = (-2147450880)
DSOP_DOWNLEVEL_FILTER_TERMINAL_SERVER = (-2147418112)
DSOP_DOWNLEVEL_FILTER_ALL_WELLKNOWN_SIDS = (-2147352576)
DSOP_DOWNLEVEL_FILTER_LOCAL_SERVICE = (-2147221504)
DSOP_DOWNLEVEL_FILTER_NETWORK_SERVICE = (-2146959360)
DSOP_DOWNLEVEL_FILTER_REMOTE_LOGON = (-2146435072)
DSOP_FLAG_MULTISELECT = 0x00000001
DSOP_FLAG_SKIP_TARGET_COMPUTER_DC_CHECK = 0x00000002
CFSTR_DSOP_DS_SELECTION_LIST = "CFSTR_DSOP_DS_SELECTION_LIST"
|
hwsyy/scrapy | refs/heads/master | scrapy/commands/__init__.py | 129 | """
Base class for Scrapy commands
"""
import os
from optparse import OptionGroup
from twisted.python import failure
from scrapy.utils.conf import arglist_to_dict
from scrapy.exceptions import UsageError
class ScrapyCommand(object):
requires_project = False
crawler_process = None
# default settings to be used for this command instead of global defaults
default_settings = {}
exitcode = 0
def __init__(self):
self.settings = None # set in scrapy.cmdline
def set_crawler(self, crawler):
assert not hasattr(self, '_crawler'), "crawler already set"
self._crawler = crawler
def syntax(self):
"""
Command syntax (preferably one-line). Do not include command name.
"""
return ""
def short_desc(self):
"""
A short description of the command
"""
return ""
def long_desc(self):
"""A long description of the command. Return short description when not
available. It cannot contain newlines, since contents will be formatted
by optparser which removes newlines and wraps text.
"""
return self.short_desc()
def help(self):
"""An extensive help for the command. It will be shown when using the
"help" command. It can contain newlines, since not post-formatting will
be applied to its contents.
"""
return self.long_desc()
def add_options(self, parser):
"""
Populate option parse with options available for this command
"""
group = OptionGroup(parser, "Global Options")
group.add_option("--logfile", metavar="FILE",
help="log file. if omitted stderr will be used")
group.add_option("-L", "--loglevel", metavar="LEVEL", default=None,
help="log level (default: %s)" % self.settings['LOG_LEVEL'])
group.add_option("--nolog", action="store_true",
help="disable logging completely")
group.add_option("--profile", metavar="FILE", default=None,
help="write python cProfile stats to FILE")
group.add_option("--lsprof", metavar="FILE", default=None,
help="write lsprof profiling stats to FILE")
group.add_option("--pidfile", metavar="FILE",
help="write process ID to FILE")
group.add_option("-s", "--set", action="append", default=[], metavar="NAME=VALUE",
help="set/override setting (may be repeated)")
group.add_option("--pdb", action="store_true", help="enable pdb on failure")
parser.add_option_group(group)
def process_options(self, args, opts):
try:
self.settings.setdict(arglist_to_dict(opts.set),
priority='cmdline')
except ValueError:
raise UsageError("Invalid -s value, use -s NAME=VALUE", print_help=False)
if opts.logfile:
self.settings.set('LOG_ENABLED', True, priority='cmdline')
self.settings.set('LOG_FILE', opts.logfile, priority='cmdline')
if opts.loglevel:
self.settings.set('LOG_ENABLED', True, priority='cmdline')
self.settings.set('LOG_LEVEL', opts.loglevel, priority='cmdline')
if opts.nolog:
self.settings.set('LOG_ENABLED', False, priority='cmdline')
if opts.pidfile:
with open(opts.pidfile, "w") as f:
f.write(str(os.getpid()) + os.linesep)
if opts.pdb:
failure.startDebugMode()
def run(self, args, opts):
"""
Entry point for running commands
"""
raise NotImplementedError
|
EventGhost/EventGhost | refs/heads/master | languages/sv_SV.py | 4 | # -*- coding: UTF-8 -*-
class General:
apply = u"Verkställ"
autostartItem = u"Autostart"
browse = u"Bläddra..."
cancel = u"Avbryt"
choose = u"Välj"
configTree = u"Konfigurationsträd"
deleteLinkedItems = u"Minst ett objekt utanför din markering refererar till ett objekt i din markering. Om du fortsätter att ta bort markeringen, kommer inte det refererande objektet att fungera längre.\n\nÄr du säker att du vill ta bort markeringen?"
deleteManyQuestion = u"Detta segment har %s subsegment.\nÄr du säker att du vill ta bort alla?"
deletePlugin = u"Detta plugin används av en eller flera actions i din onfguraton.\nDu ksn inte ts bort det innan alla actions som användet detta plugin har tagits bort."
deleteQuestion = u"Är du säker att du vill ta bort detta objekt"
help = u"&Hjälp"
noOptionsAction = u"Denna åtgärd har inga inställningar."
noOptionsPlugin = u"Detta plugin har inga inställningar."
ok = u"OK"
pluginLabel = u"Plugin: %s"
unnamedEvent = u"<icke namngiven händelse>"
unnamedFile = u"<icke namngiven fil>"
unnamedFolder = u"<icke namngiven katalog>"
unnamedMacro = u"<icke namngivet makro>"
class MainFrame:
onlyLogAssigned = u"&Logga endast tilldelade och aktiverade händelser"
class Logger:
caption = u"Log"
descriptionHeader = u"Beskrivning"
timeHeader = u"Tid"
welcomeText = u"---> Välkommen till EventGhost <---"
class Menu:
About = u"&Om EventGhost"
AddPlugin = u"Lägg till Plugin"
Apply = u"Verkställ"
CheckUpdate = u"Sök efter uppdateringar nu..."
ClearLog = u"Rensa Log"
Close = u"&Stäng"
CollapseAll = u"&Fäll ihop alla"
ConfigurationMenu = u"&Konfiguration"
Copy = u"&Kopiera"
Cut = u"K&lipp ut"
Delete = u"&Ta bort"
Disabled = u"Inaktivera objekt"
Configure = u"Konfigurera objekt"
EditMenu = u"&Redigera"
Execute = u"Exekvera objekt"
Exit = u"&Avsluta"
ExpandAll = u"&Expandera alla"
ExpandOnEvents = u"Markera automatiskt vid händelse"
ExpandTillMacro = u"Expandera automatskt endast till makro"
Export = u"Exportera..."
FileMenu = u"&Arkiv"
Find = u"&Sök..."
FindNext = u"Sök &Nästa"
HelpMenu = u"&Hjälp"
HideShowToolbar = u"Verktygsfält"
Import = u"Importera..."
LogActions = u"Logga händelser"
LogMacros = u"Logga Makron"
LogTime = u"Logga tid"
New = u"&Ny"
AddAction = u"Lägg till åtgärd"
AddEvent = u"Lägg till händelse"
AddFolder = u"Lägg till Katalog"
AddMacro = u"Lägg till Makro"
Open = u"&Öppna..."
Options = u"&Inställningar"
Paste = u"&Klistra in"
Redo = u"&Upprepa"
Rename = u"Byt namn"
Reset = u"Reset"
Save = u"&Spara"
SaveAs = u"&Spara som..."
SelectAll = u"Välj &alla"
Undo = u"&Ångra"
ViewMenu = u"Visa"
WebForum = u"Support forum"
WebHomepage = u"Hemsida"
WebWiki = u"Wikipedia"
class SaveChanges:
mesg = u"Filen har ändrats\n\nVill du spara ändringarna?"
title = u"Spara ändringar?"
class TaskBarMenu:
Exit = u"Avsluta"
Hide = u"Göm EventGhost"
Show = u"Visa EventGhost"
class Tree:
caption = u"Konfiguration"
class Error:
FileNotFound = u'Det går inte att hitta filen "%s"'
InAction = u'Fel i åtgärd: "%s"'
pluginLoadError = u"Fel vid laddning av plugin %s."
pluginNotActivated = u'Plugin "%s" är inte aktiverat'
pluginStartError = u"Fel vid start av plugin: %s"
class CheckUpdate:
ManErrorMesg = u"Det gick inte att hämta information från EventGhosts hemsida\n\nVänligen försök igen senare."
ManErrorTitle = u"Fel vid sökning efter uppdatering"
ManOkMesg = u"Detta är den senaste versionen av EventGhost."
ManOkTitle = u"Det finns ingen senare version"
downloadButton = u"Besök nerladdningssidan"
newVersionMesg = u"En senare version av EventGhost finns tillgänglig\n\n Din version: %s\n Senaste versionen %s\n\nVill du besöka nerladdningssidan nu?"
title = u"Ny EventGhost-version finns tillgänglig..."
waitMesg = u"Vänligen vänta medan EventGhost mottager uppdateringsinformation."
class AddActionDialog:
descriptionLabel = u"Beskrivning"
title = u"Välj en åtgärd att lägga till..."
class AddPluginDialog:
author = u"Författare:"
descriptionBox = u"Beskrivning"
externalPlugins = u"Extern utrustning"
noInfo = u"Det finns ingen information tillgänglig."
noMultiload = u"Detta plugin stödjer inte flera instaser. Du har redan en instans av detta plugin i din konfiguration."
noMultiloadTitle = u"Flera instanser inte möjliga"
otherPlugins = u"Övrigt"
programPlugins = u"Programkontroll"
remotePlugins = u"Mottagare"
title = u"Välj ett plugin att lägga till..."
version = u"Version:"
class OptionsDialog:
CheckUpdate = u"Sök efter senare versioner vid programstart"
HideOnClose = u"Göm huvudfönstret om stängboxen trycks."
HideOnStartup = u"Göm vid programstart"
LanguageGroup = u"Språk"
StartGroup = u"Vid programstart"
StartWithWindows = u"Starta vid Windows-uppstart"
Tab1 = u"Allmänt"
Title = u"Inställningar"
UseAutoloadFile = u"Autoladda fil"
Warning = u"Språkändring kräver omstart för att verkställas."
confirmDelete = u"Bekräfta borttagning av träd-objekt"
limitMemory1 = u"Begränsa minnesallokeringen medans minimerad till"
limitMemory2 = u"MB"
class FindDialog:
caseSensitive = u"&Matcha gemener/VERSALER"
direction = u"Riktning"
down = u"&Ner"
findButton = u"&Sök Nästa"
notFoundMesg = u'"%s" kunde inte hittas.'
searchLabel = u"&Hitta:"
searchParameters = u"Sök även åtgärdsparametrar"
title = u"Sök"
up = u"&Upp"
wholeWordsOnly = u"&Matcha endast hela ord"
class AboutDialog:
Author = u"Författare: %s"
CreationDate = u"%a, %d %b %Y %H:%M:%S"
Title = u"Om EventGhost"
Version = u"Version: %s (build %s)"
tabAbout = u"Om"
tabLicense = u"Licensavtal"
tabSpecialThanks = u"Speciellt tack"
tabSystemInfo = u"Systeminformation"
class Plugin:
class EventGhost:
name = u"EventGhost"
description = u"Här hittar du åtgärder som kontrollerar grundläggande funktioner i EventGhost"
class AutoRepeat:
name = u"Repetera automatiskt aktuellt makro"
description = u"Gör det makro där detta kommando läggs till till ett automatisk repeterande makro."
seconds = u"sekunder"
text1 = u"Starta första repetitionen efter"
text2 = u"med en repetition varje"
text3 = u"Öka repetitionen nästa"
text4 = u"till en repetition varje"
class Comment:
name = u"Kommentar"
description = u"En action som inte gör någonting, som kan användas för att kommentera din konfiguration"
class DisableItem:
name = u"Avaktiverar en post"
description = u"Avaktiverar en post"
label = u"Avaktivera: %s"
text1 = u"Välj den post som ska avaktiveras:"
class EnableExclusive:
name = u"Exklusivt aktivera en katalog/ett makro"
description = u"Aktivera specificerad katalog eller makro i din konfiguration, men avaktivera alla andra kataloger och makron som är syskon på samma nivå i denna del-gren i trädet."
label = u"Aktivera exklusivitet: %s"
text1 = u"Välj den katalog/makro som ska aktiveras:"
class EnableItem:
name = u"Aktivera en enhet"
description = u"Aktiverar en enhet i trädet"
label = u"Aktivera: %s"
text1 = u"Välj den enhet som ska aktiveras:"
class FlushEvents:
name = u"Töm händelser"
description = u'"Töm händelserna" tömmer alla händelser som för tillfället finns i åtgärdskön. Det är användbart om ett makro som tar lång tid på sig, och åtgärder har köats.\n\n<p><b>Exampel:</b> Du har ett uppstartsmakro som tar lång tid, låt säga 90 sekunder. Användaren ser ingenting förrän projektorn startas, vilket tar 60 sekunder. Det är hög sannolikhet att han/hon trycker på en fjärrkontroll som startar ett makro flera gånger efter varann, vilket orsakar att den långa åtgärden kör flera gånger. Om du då placerar en "Töm händelser" kommando i slutet av ditt makro, kommer alla överflödiga knapptryck att ignoreras.\n'
class JumpIf:
name = u"Hoppa om"
description = u"Hoppar till ett annat makro, om det specificerade python-uttrycket returneras sant."
label1 = u"Om %s gå till %s"
label2 = u"Om %s gosub %s"
mesg1 = u"Välj makrot..."
mesg2 = u"Välj det makro som ska startas om villkoret är sant."
text1 = u"Om:"
text2 = u"Gå till:"
text3 = u"återvänd efter körning"
class JumpIfLongPress:
name = u"Hoppa vid långt tryck"
description = u"Hoppar till ett annat makro, om en knapp på en fjärrkontroll trycks ner längre än den konfigurerade tiden."
label = u"Om knappen är nedtryckt %s sec, gå till: %s"
text1 = u"Om knappen är nedtryckt längre än"
text2 = u"sekunder,"
text3 = u"hoppa till:"
text4 = u"Välj makro..."
text5 = u"Välj det makro som ska triggas om när det kommer ett långt tryck."
class NewJumpIf:
name = u"Hoppa"
description = u"Hoppar till ett annat makro, om det specificerade villkoret uppfylls."
choices = [
u"senaste åtgärden lyckades",
u"senaste åtgärden misslyckades",
u"alltid",
]
labels = [
u'Om villkoret uppfylls, hoppa till "%s"',
u'Om villkoret inte uppfylls, hoppa till "%s"',
u'Hoppa till "%s"',
u'Om villkoret uppfylls, hoppa till "%s" och återvänd',
u'Om villkoret inte uppfylls, hoppa till "%s" och återvänd',
u'Hoppa till "%s" och återvänd',
]
mesg1 = u"Välj makro..."
mesg2 = u"Välj det makro som ska köras, om villkoret uppfylls."
text1 = u"Om:"
text2 = u"Hoppa till:"
text3 = u"och återvänd efter körning"
class PythonCommand:
name = u"Python-uttryck"
description = u"Kör ett enstaka Python-uttryck"
parameterDescription = u"Python-uttryck:"
class PythonScript:
name = u"Python-skript"
description = u"Python-skript"
class ShowOSD:
name = u"Visa OSD"
description = u"Visar en enkel på skärmen visning."
alignment = u"Placering:"
alignmentChoices = [
u"Uppe till vänster",
u"Uppe till höger",
u"Nere till vänster",
u"Nere till höger",
u"Mitten av skärmen",
u"Nere i mitten",
u"Uppe i mitten",
u"Till vänster i mitten",
u"Till höger i mitten",
]
display = u"Visa på skärm:"
editText = u"Text som ska visas:"
label = u"Visa OSD: %s"
osdColour = u"Färg:"
osdFont = u"Teckensnitt"
outlineFont = u"Bakgrundsfärg"
wait1 = u"Göm efter"
wait2 = u"sekunder (0 = aldrig)"
xOffset = u"Horisontell offset X:"
yOffset = u"Vertikal offset Y:"
class StopIf:
name = u"Stoppa om"
description = u"Stoppar körningen av det aktuella makrot, om det speciella Python-villkoret uppfylls."
label = u"Stoppa om %s"
parameterDescription = u"Python-villkor:"
class StopProcessing:
name = u"Stoppa detta event"
description = u"Stoppa detta event"
class TriggerEvent:
name = u"Trigga Event"
description = u"Genererar ett event"
labelWithTime = u'Trigga event "%s" efter %.2f sekunder'
labelWithoutTime = u'Trigga event "%s"'
text1 = u"Sträng som ska skickas:"
text2 = u"Fördröjning:"
text3 = u"sekuder. (0 = omedelbart)"
class Wait:
name = u"Vänta ett tag"
description = u"Vänta ett tag"
label = u"Vänta: %s sek"
seconds = u"sekunder"
wait = u"Vänta"
class System:
name = u"System"
description = u"Kontrollerar olika delar av systemet, så som ljudkortet, grafikkortet etc."
forced = u"Tvingad: %s"
forcedCB = u"Tvinga stänga alla program"
class ChangeDisplaySettings:
name = u"Ändra skärminställningar"
description = u"Ändra skärminställningar"
class ChangeMasterVolumeBy:
name = u"Ändra huvudvolymen"
description = u"Ändra huvudvolymen"
text1 = u"Ändra huvudvolymen med"
text2 = u"procent."
class Execute:
name = u"Starta applikation"
description = u"Startar en körbar fil."
FilePath = u"Sökväg till filen:"
Parameters = u"kommandorads inställningar:"
ProcessOptions = (
u"Realtid",
u"Mer än normalt",
u"Normalt",
u"Mindre än normalt",
u"Overksam",
)
ProcessOptionsDesc = u"Processprioritet:"
WaitCheckbox = u"Vänta tills applikationen är avslutad innan fortsättning"
WindowOptions = (
u"Normal",
u"Minimerad",
u"Maximerad",
u"Dold",
)
WindowOptionsDesc = u"Fönsterinställningar"
WorkingDir = u"Arbetsmapp:"
browseExecutableDialogTitle = u"Välj program"
browseWorkingDirDialogTitle = u"Välj arbetsmapp"
label = u"Starta Programmet: %s"
class Hibernate:
name = u"Sätt daton i viloläge"
description = u"Denna funktionen sätter datorn i viloläge"
class LockWorkstation:
name = u"Lås datorn"
description = u"Denna funktionen låser datorn, samma som att trycka Ctrl+Alt+Del och trycka lås dator."
class LogOff:
name = u"Logga ur aktuell användare"
description = u"Stänger ner alla processer och loggar ur aktuell användare"
class MonitorGroup:
name = u"Skärm"
description = u"Dessa åtgärder kontrollerar skärmen."
class MonitorPowerOff:
name = u"Stäng skärmen"
description = u"Sätter skärmen i strömspar-läge"
class MonitorPowerOn:
name = u"Sätt på skärmen"
description = u"Sätter på skärmen från strömspar-läge, stänger även av skärmsläckare."
class MonitorStandby:
name = u"Sätt skärmen i stand-by-läge"
description = u"Sätt skärmen i stand-by-läge"
class MuteOff:
name = u"Sätt på ljudet"
description = u"Sätter på ljudet"
class MuteOn:
name = u"Stäng av ljudet"
description = u"Stänger av ljudet"
class OpenDriveTray:
name = u"Öppna/Stäng CD/DVD-enheter"
description = u"Öppnar eller stänger luckan på CD/DVD-enheter."
driveLabel = u"Enhet:"
labels = [
u"Toggla luckan på enhet: %s",
u"Öppna luckan på enhet: %s",
u"Stäng luckan på enhet: %s",
]
options = [
u"Ändrar mellan öppen och stängd lucka",
u"Öppnar luckan",
u"Stänger luckan",
]
optionsLabel = u"Välj händelse"
class PlaySound:
name = u"Spela ljud"
description = u"Spelar upp ett ljud"
fileMask = u"Wav-filer (*.WAV)|*.wav|Alla filer (*.*)|*.*"
text1 = u"Sökväg till ljudfilen:"
text2 = u"Vänta tills slutet"
class PowerDown:
name = u"Stäng av datorn"
description = u"Stänger av datorn."
class PowerGroup:
name = u"Strömhantering"
description = u"Dessa åtgärder stänger av, startar om eller försätter datorn i viloläge. Det går också att låsa datorn samt logga ut användare."
class Reboot:
name = u"Starta om"
description = u"Starta om datorn"
class RegistryChange:
name = u"Ändra i registret"
description = u"Ändra värden i windows-registret."
actions = (
u"Skapa eller ändra",
u"Ändra om redan finns",
u"Ta bort",
)
labels = (
u'Ändra "%s" till "%s"',
u'Ändra "%s" till "%s" om det redan finns ett värde',
u'Ta bort "%s"',
)
class RegistryGroup:
name = u"Registret"
description = u"Frågar eller ändrar värden i windows-registret."
actionText = u"Åtgärd:"
chooseText = u"Välj registernyckel:"
defaultText = u"(Standard)"
keyOpenError = u"Fel vid öppning av registernyckel"
keyText = u"Nyckel:"
keyText2 = u"Nyckel"
newValue = u"Nytt värde:"
noKeyError = u"Ingen nyckel angiven"
noNewValueError = u"Inget värde angivet"
noSubkeyError = u"Ingen undernyckel angiven"
noTypeError = u"Ingen typ angiven "
noValueNameError = u"Inget värde angivet"
noValueText = u"Värdet hittades inte"
oldType = u"Nuvarande typ:"
oldValue = u"Nuvarande värde:"
typeText = u"Typ:"
valueChangeError = u"Fel vid ändring av värde"
valueName = u"Värdenamn:"
valueText = u"Värde:"
class RegistryQuery:
name = u"Fråga registret"
description = u"Fråga registret och få tillbaka värdet"
actions = (
u"kontrollera om det finns",
u"returnera resultat",
u"jämför med",
)
labels = (
u'Kontrollera om "%s" finns',
u'Returnera "%s"',
u'Jämför "%s" med %s',
)
class ResetIdleTimer:
name = u"Nollställ Idle-timern"
description = u"Nollställ Idle-timern"
class SetClipboard:
name = u"Kopiera sträng till utklipp"
description = u"Kopierar sträng till utklipp"
error = u"Kan inte öppna utklipp"
class SetDisplayPreset:
name = u"Ställ primär bildskärm"
description = u"Ställ primär bildskärm"
fields = (
u"Enehet",
u"Vänster",
u"Topp",
u"Bredd",
u"Höjd",
u"Frekvens",
u"Färgdjup",
u"Bifogad",
u"Primär",
u"Flaggor",
)
query = u"Nuvarande bildskärmsinställningar"
class SetIdleTime:
name = u"Ställ Idle-tid"
description = u"Ställ Idle-tid"
label1 = u"Vänta"
label2 = u"sekunder innan idle triggas"
class SetMasterVolume:
name = u"Ställ huvudvolymen"
description = u"Ställ huvudvolymen"
text1 = u"Ställ huvudvolymen till"
text2 = u"procent."
class SetSystemIdleTimer:
name = u"Ställ systemets Idle-timer"
description = u"Ställ systemets Idle-timer"
choices = [
u"Avaktivera systemets Idle-timer",
u"Avaktivera systemets Idle-timer",
]
text = u"Välj alternativ:"
class SetWallpaper:
name = u"Ändra bakgrundsbild"
description = u"Ändra bakgrundsbild"
choices = (
u"Centrerad",
u"Sida vid sida",
u"Anpassad",
)
fileMask = u"Alla bild-filer|*.jpg;*.bmp;*.gif|All Files (*.*)|*.*"
text1 = u"Sökväg till bilden:"
text2 = u"Placering:"
class ShowPicture:
name = u"Visa bild"
description = u"Visar en bild"
allFiles = u"Alla filer"
allImageFiles = u"Alla bild-filer"
display = u"Skärm"
path = u"Sökväg till bilden"
class SoundGroup:
name = u"Ljudkort"
description = u"Kontrollera inställningarna för ljudkortet"
class Standby:
name = u"Sätt datorn i stand-by"
description = u"Sätt datorn i stand-by"
class StartScreenSaver:
name = u"Starta skärmsläckaren"
description = u"Startar skärmsläckaren."
class ToggleMute:
name = u"Ändra Mute"
description = u"Ändra Mute"
class WakeOnLan:
name = u"Wake on LAN"
description = u"Starta en dator genom Wake on LAN (WOL)"
parameterDescription = u"MAC-adress som ska väckas:"
class Window:
name = u"Fönster"
description = u"Åtgärder som kan kontrollera fönster, så som att hitta ett specifikt fönster, flytta, ändra storlek och skicka knapptryckningar."
class BringToFront:
name = u"Visa överst"
description = u"Lägger det specificeraade fönstret överst"
class Close:
name = u"Stäng"
description = u"Stänger ett fönster"
class FindWindow:
name = u"Hitta ett fönster"
description = u'Letar efter ett fönster, som senare kan användas för andra fönsteråtgärder i makrot.\n\n<p>Om ett makro inte har "Hitta ett fönster" åtgärder, kommer alla fönsteråtgärder påverka det fönster som har fokus.'
drag1 = u"Drag mig till\nett fönster."
drag2 = u"Flytta mig nu\ntill ett fönster."
hide_box = u"Göm EventGhost under dragning"
invisible_box = u"Leta även efter osynliga fönster"
label = u"Hitta fönster: %s"
label2 = u"Hitta det främsta fönstret"
matchNum1 = u"Skicka endast tillbaka"
matchNum2 = u":e träff"
onlyFrontmost = u"Matcha endast det främsta fönstret"
options = (
u"Program:",
u"Fönsternamn:",
u"Fönsterklass:",
u"Namn på underfönster:",
u"Underklass:",
)
refresh_btn = u"&Uppdatera"
stopMacro = [
u"Stoppa makro om målet inte hittas",
u"Stoppa makro om målet hittas",
u"Stoppa aldrig makrot",
]
testButton = u"Testa"
wait1 = u"Vänta upp till"
wait2 = u"sekunder för att fönstret visas"
class Maximize:
name = u"Maximera"
description = u"Maximera"
class Minimize:
name = u"Minimera"
description = u"Minimera"
class MoveTo:
name = u"Absolut flyttning"
description = u"Absolut flyttning"
label = u"Flytta fönster till %s"
text1 = u"Ställ horisontell position X till"
text2 = u"pixlar"
text3 = u"Ställ vertikal position Y till"
text4 = u"pixlar"
class Resize:
name = u"Ändra storlek"
description = u"Ändrar ett fönsters storlek till specificerad storlek."
label = u"Ändra storlek till %s, %s"
text1 = u"Sätt bredd till"
text2 = u"pixlar"
text3 = u"Sätt höjd till"
text4 = u"pixlar"
class Restore:
name = u"Återskapa"
description = u"Återskapa"
class SendKeys:
name = u"Emulera knapptryck"
description = u'Denna åtgärd emulerar knapptryckningar för att kontrollera andra program.\nSkriv bara in den text du vill i textrutan\n\n<p>\nFör att emulera specialknappar, måste du innesluta ett nyckelord inom måsvingar "{ }"\nTill exempel om du vill knappkombinationen Ctrl och V skriver du <b>{Ctrl+V}</b>\nDet går att komibnera fler knapptryckningar så som: <b>{Shift+Ctrl+F1}</b>\n<p>\nVissa tangenter skiljer mellan vänster och höger sida av tangentbordet, så kan dom börja\nmed ett "L" eller ett "R", så som Windows-tangenten:\n<b>{Win}</b> or <b>{LWin}</b> or <b>{RWin}</b>\n<p>\nHär följer en lista på andra nyckelord som EventGhost kan hantera:\n<br>\n<b>{Ctrl}</b> eller <b>{Control}<br>\n{Shift}<br>\n{Alt}<br>\n{Return}</b> eller <b>{Enter}<br>\n{Back}</b> eller <b>{Backspace}<br>\n{Tab}</b> eller <b>{Tabulator}<br>\n{Esc}</b> eller <b>{Escape}<br>\n{Spc}</b> eller <b>{Space}<br>\n{Up}<br>\n{Down}<br>\n{Left}<br>\n{Right}<br>\n{PgUp}</b> eller <b>{PageUp}<br>\n{PgDown}</b> eller <b>{PageDown}<br>\n{Home}<br>\n{End}<br>\n{Ins}</b> eller <b>{Insert}<br>\n{Del}</b> eller <b>{Delete}<br>\n{Pause}<br>\n{Capslock}<br>\n{Numlock}<br>\n{Scrolllock}<br>\n{F1}, {F2}, ... , {F24}<br>\n{Apps}</b> (Detta är menyknappen som sitter brevid den högra windows-tangenten)<b><br>\n<br>\n</b>Detta är knapparna på det numeriska tangentbordet:<b><br>\n{Divide}<br>\n{Multiply}<br>\n{Subtract}<br>\n{Add}<br>\n{Decimal}<br>\n{Numpad0}, {Numpad1}, ... , {Numpad9}</b>\n'
insertButton = u"&Lägg in"
specialKeyTool = u"Specialknapps verktyg"
textToType = u"Text som ska skickas:"
useAlternativeMethod = u"Använd alternativ metod för att emulera knapptryck"
class Keys:
backspace = u"Sudda"
context = u"Menyknapp"
delete = u"Delete"
down = u"Ner"
end = u"End"
enter = u"Enter"
escape = u"Escape"
home = u"Home"
insert = u"Insert"
left = u"Vänster"
num0 = u"Numeriskt tangentbord 0"
num1 = u"Numeriskt tangentbord 1"
num2 = u"Numeriskt tangentbord 2"
num3 = u"Numeriskt tangentbord 3"
num4 = u"Numeriskt tangentbord 4"
num5 = u"Numeriskt tangentbord 5"
num6 = u"Numeriskt tangentbord 6"
num7 = u"Numeriskt tangentbord 7"
num8 = u"Numeriskt tangentbord 8"
num9 = u"Numeriskt tangentbord 9"
numAdd = u"Numeriskt tangentbord +"
numDecimal = u"Numeriskt tangentbord ,"
numDivide = u"Numeriskt tangentbord /"
numMultiply = u"Numeriskt tangentbord *"
numSubtract = u"Numeriskt tangentbord -"
pageDown = u"Ner"
pageUp = u"Upp"
returnKey = u"Return"
right = u"Höger"
space = u"Mellanslag"
tabulator = u"Tab"
up = u"Upp"
win = u"Windows-tangenten"
class SendMessage:
name = u"Skicka meddelande"
description = u"Använder Windows-api:et SendMessage för att skicka ett specifikt meddelande till ett fönster. Det går också att använda PostMessage om så önskas."
text1 = u"Använd PostMessage istället för SendMessage"
class SetAlwaysOnTop:
name = u"Sätt alltid överst"
description = u"Sätt alltid överst"
actions = (
u"Ta bort alltid överst",
u"Sätt alltid överst",
u"Toggla alltid överst",
)
radioBox = u"Välj händelse:"
class Mouse:
name = u"Mus"
description = u"Åtgärder som kontrollerar muspekaren."
class GoDirection:
name = u"Flytta musen åt ett håll"
description = u"Flytta musen åt ett håll"
label = u"Flytta musen åt %.2f°"
text1 = u"Riktning som muspekaren ska flyttas "
text2 = u"(0-360)"
class LeftButton:
name = u"Vänster musknapp"
description = u"Vänster musknapp"
class LeftDoubleClick:
name = u"Vänster musknapp dubbelklick"
description = u"Vänster musknapp dubbelklick"
class MiddleButton:
name = u"Mitten musknapp"
description = u"Mitten musknapp"
class MouseWheel:
name = u"Snurra scrollhjulet"
description = u"Snurra scrollhjulet"
label = u"Snurra scrollhjulet %d steg"
text1 = u"Snurra scrollhjulet med"
text2 = u"steg. (Negativt värde snurrar neråt)"
class MoveAbsolute:
name = u"Absolut flyttning"
description = u"Absolut flyttning"
label = u"Flytta muspekaren till x:%s, y:%s"
text1 = u"Sätt horisontell position X till"
text2 = u"pixlar"
text3 = u"Sätt vertikal position Y till"
text4 = u"pixlar"
class RightButton:
name = u"Höger musknapp"
description = u"Höger musknapp"
class RightDoubleClick:
name = u"Höger musknapp dubbelklick"
description = u"Höger musknapp dubbelklick"
class ToggleLeftButton:
name = u"Toggla vänster musknapp"
description = u"Toggla vänster musknapp"
class Joystick:
name = u"Joystick"
description = u"Använd joystick eller gamepad som in-enhet till EventGhost."
class Keyboard:
name = u"Tangentbord"
description = u"Detta plugin genererar händelser vid knapptryckningar (Hotkeys)"
class MediaPlayerClassic:
name = u"Media Player Classic"
description = u'Kontrollera <a href="http://sourceforge.net/projects/guliverkli/">Media Player Classic</a>.\n\n<p>Endast för version <b>6.4.8.9</b> eller senare. Pluginet fungerar inte med äldre versioner!>/p>\n<p><a href=http://www.eventghost.net/forum/viewtopic.php?t=17>Bugrapporter</a></p>\n<p><a href="http://sourceforge.net/projects/guliverkli/">Media Player Classic SourceForge Projekt</a></p>'
class AlwaysOnTop:
name = u"Alltid överst"
description = u"Alltid överst"
class AudioDelayAdd10ms:
name = u"Fördröj ljudet +10ms"
description = u"Fördröj ljudet +10ms"
class AudioDelaySub10ms:
name = u"Fördröj ljudet -10ms"
description = u"Fördröj ljudet -10ms"
class Close:
name = u"Stäng fil"
description = u"Stäng fil"
class DVDAngleMenu:
name = u"DVD vinkelmeny"
description = u"DVD vinkelmeny"
class DVDAudioMenu:
name = u"DVD Ljudmeny"
description = u"DVD Ljudmeny"
class DVDChapterMenu:
name = u"DVD kapitelmeny"
description = u"DVD kapitelmeny"
class DVDMenuActivate:
name = u"DVD meny aktivera"
description = u"DVD meny aktivera"
class DVDMenuBack:
name = u"DVD meny tillbaka"
description = u"DVD meny tillbaka"
class DVDMenuDown:
name = u"DVD meny ner"
description = u"DVD meny ner"
class DVDMenuLeave:
name = u"DVD meny lämna"
description = u"DVD meny lämna"
class DVDMenuLeft:
name = u"DVD meny vänster"
description = u"DVD meny vänster"
class DVDMenuRight:
name = u"DVD meny höger"
description = u"DVD meny höger"
class DVDMenuUp:
name = u"DVD meny upp"
description = u"DVD meny upp"
class DVDNextAngle:
name = u"DVD nästa vinkel"
description = u"DVD nästa vinkel"
class DVDNextAudio:
name = u"DVD nästa ljud"
description = u"DVD nästa ljud"
class DVDNextSubtitle:
name = u"DVD nästa undertext"
description = u"DVD nästa undertext"
class DVDOnOffSubtitle:
name = u"DVD av/på undertext"
description = u"DVD av/på undertext"
class DVDPrevAngle:
name = u"DVD föregående vinkel"
description = u"DVD föregående vinkel"
class DVDPrevAudio:
name = u"DVD föregående ljud"
description = u"DVD föregående ljud"
class DVDPrevSubtitle:
name = u"DVD föregående undertext"
description = u"DVD föregående undertext"
class DVDRootMenu:
name = u"DVD rotmeny"
description = u"DVD rotmeny"
class DVDSubtitleMenu:
name = u"DVD undertextmeny"
description = u"DVD undertextmeny"
class DVDTitleMenu:
name = u"DVD titelmeny"
description = u"DVD titelmeny"
class DecreaseRate:
name = u"Minska hastighet"
description = u"Minska hastighet"
class Exit:
name = u"Avsluta"
description = u"Avsluta applikation"
class FiltersMenu:
name = u"Filtermeny"
description = u"Filtermeny"
class FrameStep:
name = u"Stega bild"
description = u"Stega bild"
class FrameStepBack:
name = u"Stega bild bakåt"
description = u"Stega bild bakåt"
class Fullscreen:
name = u"Fullskärm"
description = u"Fullskärm"
class FullscreenWOR:
name = u"Fullskärm utan att ändra upplösning"
description = u"Fullskärm utan att ändra upplösning"
class GoTo:
name = u"Gå till "
description = u"Gå till"
class IncreaseRate:
name = u"Öka hastighet"
description = u"Öka hastighet"
class JumpBackwardKeyframe:
name = u"Hoppa bakåt nyckelruta"
description = u"Hoppa bakåt nyckelruta"
class JumpBackwardLarge:
name = u"Hoppa långt bakåt"
description = u"Hoppa långt bakåt"
class JumpBackwardMedium:
name = u"Hoppa bakåt medium"
description = u"Hoppa bakåt medium"
class JumpBackwardSmall:
name = u"Hoppa lite bakåt"
description = u"Hoppa lite bakåt"
class JumpForwardKeyframe:
name = u"Hoppa framåt nyckelruta"
description = u"Hoppa framåt nyckelruta"
class JumpForwardLarge:
name = u"Hoppa långt framåt"
description = u"Hoppa långt framåt"
class JumpForwardMedium:
name = u"Hoppa framåt medium"
description = u"Hoppa framåt medium"
class JumpForwardSmall:
name = u"Hoppa lite framåt"
description = u"Hoppa lite framåt"
class LoadSubTitle:
name = u"Ladda undertext"
description = u"Ladda undertext"
class Next:
name = u"Nästa"
description = u"Nästa"
class NextAudio:
name = u"Nästa ljud"
description = u"Nästa ljud"
class NextAudioOGM:
name = u"Nästa OGM ljud"
description = u"Nästa OGM ljud"
class NextPlaylistItem:
name = u"Nästa i playlisten"
description = u"Nästa i playlisten"
class NextSubtitle:
name = u"Nästa undertext"
description = u"Nästa undertext"
class NextSubtitleOGM:
name = u"Nästa OGM undertext"
description = u"Nästa OGM undertext"
class OnOffSubtitle:
name = u"Av/på undertext"
description = u"Av/på undertext"
class OpenDVD:
name = u"Öppna DVD"
description = u"Öppna DVD"
class OpenDevice:
name = u"Öppna enhet"
description = u"Öppna enhet"
class OpenFile:
name = u"Öppna fil"
description = u"Öppna fil"
class Options:
name = u"Inställningar"
description = u"Inställningar"
class Pause:
name = u"Pausa"
description = u"Pausa"
class Play:
name = u"Play"
description = u"Play"
class PlayPause:
name = u"Play/Paus"
description = u"Play/Paus"
class PrevAudio:
name = u"Föregående ljud"
description = u"Föregående ljud"
class PrevAudioOGM:
name = u"Föregående OGM ljud"
description = u"Föregående OGM ljud"
class PrevSubtitle:
name = u"Föregående undertext"
description = u"Föregående undertext"
class PrevSubtitleOGM:
name = u"Föregående OGM undertext"
description = u"Föregående OGM undertext"
class Previous:
name = u"Föregående"
description = u"Föregående"
class PreviousPlaylistItem:
name = u"Föregående i playlisten"
description = u"Föregående i playlisten"
class Properties:
name = u"Egenskaper"
description = u"Egenskaper"
class QuickOpen:
name = u"Snabbt öppna fil"
description = u"Snabbt öppna fil"
class ReloadSubtitles:
name = u"Ladda om undertexter"
description = u"Ladda om undertexter"
class ResetRate:
name = u"Återställ hastighet"
description = u"Återställ hastighet"
class SaveAs:
name = u"Spara som"
description = u"Spara som"
class SaveImage:
name = u"Spara bild"
description = u"Spara bild"
class SaveImageAuto:
name = u"Spara bild automatiskt"
description = u"Spara bild automatiskt"
class SaveSubtitle:
name = u"Spara undertext"
description = u"Spara undertext"
class Stop:
name = u"Stopp"
description = u"Stopp"
class ToggleControls:
name = u"Toggla kontroller"
description = u"Toggla kontroller"
class ToggleInformation:
name = u"Toggla information"
description = u"Toggla information"
class TogglePlaylistBar:
name = u"Toggla playlistrutan"
description = u"Toggla playlistrutan"
class ToggleSeeker:
name = u"Toggla sökaren"
description = u"Toggla sökaren"
class ToggleStatistics:
name = u"Toggla statistik"
description = u"Toggla statistik"
class ToggleStatus:
name = u"Toggla status"
description = u"Toggla status"
class ViewCompact:
name = u"Visa kompakt"
description = u"Visa kompakt"
class ViewMinimal:
name = u"Visa minimal"
description = u"Visa minimal"
class ViewNormal:
name = u"Visa normal"
description = u"Visa normal"
class VolumeDown:
name = u"Sänk volymen"
description = u"Sänk volymen"
class VolumeMute:
name = u"Tyst"
description = u"Tyst"
class VolumeUp:
name = u"Öka volymen"
description = u"Öka volymen"
class Zoom100:
name = u"Zooma 100%"
description = u"Zooma 100%"
class Zoom200:
name = u"Zooma 200%"
description = u"Zooma 200%"
class Zoom50:
name = u"Zooma 50%"
description = u"Zooma 50%"
class Serial:
name = u"Serieport"
description = u"Kommunicera via serieporten"
baudrate = u"Hastighet:"
bytesize = u"Antal bitar:"
eventPrefix = u"Händelseprefix:"
flowcontrol = u"Flödesreglering:"
generateEvents = u"Generera händelse vid inkommande data"
handshakes = [
u"Ingen",
u"Xon / Xoff",
u"Hårdvara",
]
parities = [
u"Ingen paritet",
u"Udda",
u"Jämn",
]
parity = u"Paritet:"
port = u"Port:"
stopbits = u"Stopbitar:"
terminator = u"Terminator:"
class Read:
name = u"Läs data"
description = u"Läs data"
read_all = u"Läs så många byte som finns tillgängliga"
read_some = u"Läs exakt såhär många bytes:"
read_time = u"och vänta maximalt såhär många millisekunder på dom:"
class Write:
name = u"Skicka data"
description = u"Skicka data via serieporten\n\n\n<p>Du kan använda Pythonsträngar för att skicka icke skrivbara tecken.\n\n\nNågra exempel:\n<p>\n will skickar en linefeed (LF)<br>\r skickar en carriage return (CR)<br>\t skickar en tab<br>\x0B skickar ascii-tecknet för 0B (hexadecimalt)<br>\\ skickar en enstaka backslash."
class SysTrayMenu:
name = u"Meny i meddelandefältet (SysTray)"
description = u"Tillåter dig att lägga till ett eget menyinnehåll i EventGhosts meny i meddelandefältet (SysTray)"
addBox = u"Lägg till:"
addItemButton = u"Menyinnehåll"
addSeparatorButton = u"Separator"
deleteButton = u"Ta bort"
editEvent = u"Händelse:"
editLabel = u"Etikett:"
eventHeader = u"Händelse"
labelHeader = u"Etikett"
unnamedEvent = u"Händelse%s"
unnamedLabel = u"Nytt menyinnehåll %s"
class TellStick:
name = u"TellStick"
description = u'<p>Plugin för att kontrollera TellStick-kompatibla enheter.</p>\n\n<p><a href="http://www.telldus.se">Telldus Hemsida</a></p><center><img src="tellstick.png" /></center>'
class TurnOff:
name = u"Släck"
description = u"Släcker en TellStick-enhet"
class TurnOn:
name = u"Tänd"
description = u"Tänder en TellStick-enhet"
class Timer:
name = u"Timer"
description = u"Triggar en händelse efter en inställd tid och repeterar efter ett intervall om du så önskar"
colLabels = (
u"Namn",
u"Start tid",
u"Nästa händelse",
u"Namn på händelse",
u"Loop-räknare",
u"Loopar",
u"Intervall",
)
listhl = u"Nuvarande aktiva timrar:"
stopped = u"Plugin stoppat"
timerFinished = u"Timern är klar"
class TimerAction:
name = u"Starta ny eller kontrollera befintlig timer"
description = u" "
actions = (
u"Starta om timer med nuvarande inställningar",
u"Starta om timer (endast när den kör)",
u"Återställ loop-räknaren",
u"Avbryt",
)
addCounterToName = u"lägg till loop-räknaren till händelsens namn"
eventName = u"Händelsens namn:"
interval1 = u"Intervall:"
interval2 = u"sekunder"
labelStart = u'Starta timer "%s" (%s loopar, %.2f sekunders intervall)'
labelStartOneTime = u'Starta timer "%s"'
labelStartUnlimited = u'Starta timer "%s" (oändligt antal loopar, %.2f sekunders intervall)'
labels = (
u'Starta om timer "%s"',
u'Starta om timer "%s" om den fortfarande kör',
u'Återställ räknaren för timer "%s"',
u'Avbryt timer "%s"',
)
loop1 = u"Loopar:"
loop2 = u"(0 = obegränsat)"
showRemaingLoopsText = u"loop-räknare visar antal återstående loopar"
start = u"Starta ny timer (nuvarande timer med samma namn avbryts)"
startTime = u"Starta:"
startTimeTypes = (
u"omgående",
u"Efter intervall-tiden",
u"vid angiven tid (TT:MM:SS)",
u"efter angiven varatighet (TT:MM:SS)",
u"nästa hel minut",
u"nästa hela fem minuter",
u"nästa hela kvart",
u"nästa hela halvtimme",
u"nästa hel timme",
)
timerName = u"Timerns namn:"
class Webserver:
name = u"Webserver"
description = u"Implementerar en enkel webserver, som du kan använda för att generera händelser genom HTML-sidor"
documentRoot = u"Dokument root:"
eventPrefix = u"Händelseprefix:"
port = u"Port:"
class Winamp:
name = u"Winamp"
description = u'Kontrollera <a href="http://www.winamp.com/">Winamp</a>.'
class ChangeRepeatStatus:
name = u"Ändra status på repetera"
description = u"Ändra status på repetera"
radioBoxLabel = u"Egenskaper"
radioBoxOptions = [
u"Ta bort repetera",
u"Sätt repetera",
u"Ändra repetera",
]
class ChangeShuffleStatus:
name = u"Ändra slump status"
description = u"Ändra slump status"
radioBoxLabel = u"Egenskaper"
radioBoxOptions = [
u"Ta bort slump",
u"Sätt slump",
u"Ändra slump",
]
class ChooseFile:
name = u"Välj fil"
description = u"Välj fil"
class DiscretePause:
name = u"Pausa"
description = u"Pausar Winamp om den spelar, men gör ingenting om den redan är pausad"
class ExVis:
name = u"Starta Visualisation"
description = u"Starta Visualisation"
class Exit:
name = u"Avsluta"
description = u"Avslutar Winamp"
class Fadeout:
name = u"Fada ut"
description = u"Fadar ut och stoppar"
class FastForward:
name = u"Hoppa framåt"
description = u"Hoppa 5 sekunder framåt"
class FastRewind:
name = u"Hoppa bakåt"
description = u"Hoppar 5 sekunder bakåt"
class NextTrack:
name = u"Nästa låt"
description = u"Hoppar till nästa låt i playlisten"
class Pause:
name = u"Pausa"
description = u"Pausar"
class Play:
name = u"Play"
description = u"Play"
class PreviousTrack:
name = u"Föregående låt"
description = u"Hoppar till föregående låt i playlisten"
class SetVolume:
name = u"Ställ volymen"
description = u"Ställ volymen"
class ShowFileinfo:
name = u"Visa filinformation"
description = u"Visa filinformation"
class Stop:
name = u"Stoppa"
description = u"Stoppar"
class TogglePlay:
name = u"Toggla play"
description = u"Togglar mellan play och pause"
class ToggleRepeat:
name = u"Toggla repetera"
description = u"Toggla repetera"
class ToggleShuffle:
name = u"Toggla slump"
description = u"Toggla slump"
class VolumeDown:
name = u"Volym ner"
description = u"Sänker volymen med 1%"
class VolumeUp:
name = u"Volym upp"
description = u"Höjer volymen med 1%"
|
badreddinetahir/pwn_plug_sources | refs/heads/master | src/theharvester/discovery/googlesearch.py | 8 | import string
import httplib, sys
import parser
import re
import time
class search_google:
def __init__(self,word,limit,start):
self.word=word
self.files="pdf"
self.results=""
self.totalresults=""
self.server="www.google.com"
self.hostname="www.google.com"
self.userAgent="(Mozilla/5.0 (Windows; U; Windows NT 6.0;en-US; rv:1.9.2) Gecko/20100115 Firefox/3.6"
self.quantity="100"
self.limit=limit
self.counter=start
def do_search(self):
h = httplib.HTTP(self.server)
h.putrequest('GET', "/search?num="+self.quantity+"&start=" + str(self.counter) + "&hl=en&meta=&q=%40\"" + self.word + "\"")
h.putheader('Host', self.hostname)
h.putheader('User-agent', self.userAgent)
h.endheaders()
returncode, returnmsg, headers = h.getreply()
self.results = h.getfile().read()
self.totalresults+= self.results
def do_search_files(self,files):
h = httplib.HTTP(self.server)
h.putrequest('GET', "/search?num="+self.quantity+"&start=" + str(self.counter) + "&hl=en&meta=&q=filetype:"+files+"%20site:" + self.word)
h.putheader('Host', self.hostname)
h.putheader('User-agent', self.userAgent)
h.endheaders()
returncode, returnmsg, headers = h.getreply()
self.results = h.getfile().read()
self.totalresults+= self.results
def do_search_profiles(self):
h = httplib.HTTP(self.server)
h.putrequest('GET', '/search?num='+ self.quantity + '&start=' + str(self.counter) + '&hl=en&meta=&q=site:www.google.com%20intitle:"Google%20Profile"%20"Companies%20I%27ve%20worked%20for"%20"at%20' + self.word + '"')
h.putheader('Host', self.hostname)
h.putheader('User-agent', self.userAgent)
h.endheaders()
returncode, returnmsg, headers = h.getreply()
self.results = h.getfile().read()
self.totalresults+= self.results
def check_next(self):
renext = re.compile('> Next <')
nextres=renext.findall(self.results)
if nextres !=[]:
nexty="1"
else:
nexty="0"
return nexty
def get_emails(self):
rawres=parser.parser(self.totalresults,self.word)
return rawres.emails()
def get_hostnames(self):
rawres=parser.parser(self.totalresults,self.word)
return rawres.hostnames()
def get_files(self):
rawres=parser.parser(self.totalresults,self.word)
return rawres.fileurls(self.files)
def get_profiles(self):
rawres=parser.parser(self.totalresults,self.word)
return rawres.profiles()
def process(self):
while self.counter <= self.limit:
self.do_search()
more = self.check_next()
time.sleep(1)
self.counter+=100
print "\tSearching "+ str(self.counter) + " results..."
def process_files(self,files):
while self.counter <= self.limit:
self.do_search_files(files)
time.sleep(1)
self.counter+=100
print "\tSearching "+ str(self.counter) + " results..."
def process_profiles(self):
while self.counter < self.limit:
self.do_search_profiles()
time.sleep(0.3)
more = self.check_next()
if more == "1":
self.counter+=100
else:
break
|
FLIHABI/Farango | refs/heads/bleeding | check/tests/bind/good/rec_function.py | 1 | input =b"""
fun a() : int;
fun b() : int = a() + 1;
fun a() : int = b() + 1;
"""
rules = [ 'compare_exit_status' ]
|
raildo/keystone | refs/heads/master | keystone/middleware/ec2_token.py | 4 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Starting point for routing EC2 requests.
"""
import urlparse
from eventlet.green import httplib
from oslo.config import cfg
import webob.dec
import webob.exc
from keystone.common import config
from keystone.common import wsgi
from keystone.openstack.common import jsonutils
keystone_ec2_opts = [
cfg.StrOpt('keystone_ec2_url',
default='http://localhost:5000/v2.0/ec2tokens',
help='URL to get token from ec2 request.'),
]
CONF = config.CONF
CONF.register_opts(keystone_ec2_opts)
class EC2Token(wsgi.Middleware):
"""Authenticate an EC2 request with keystone and convert to token."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
# Read request signature and access id.
try:
signature = req.params['Signature']
access = req.params['AWSAccessKeyId']
except KeyError:
raise webob.exc.HTTPBadRequest()
# Make a copy of args for authentication and signature verification.
auth_params = dict(req.params)
# Not part of authentication args
auth_params.pop('Signature')
# Authenticate the request.
creds = {
'ec2Credentials': {
'access': access,
'signature': signature,
'host': req.host,
'verb': req.method,
'path': req.path,
'params': auth_params,
}
}
creds_json = jsonutils.dumps(creds)
headers = {'Content-Type': 'application/json'}
# Disable 'has no x member' pylint error
# for httplib and urlparse
# pylint: disable-msg=E1101
o = urlparse.urlparse(CONF.keystone_ec2_url)
if o.scheme == 'http':
conn = httplib.HTTPConnection(o.netloc)
else:
conn = httplib.HTTPSConnection(o.netloc)
conn.request('POST', o.path, body=creds_json, headers=headers)
response = conn.getresponse().read()
conn.close()
# NOTE(vish): We could save a call to keystone by
# having keystone return token, tenant,
# user, and roles from this call.
result = jsonutils.loads(response)
try:
token_id = result['access']['token']['id']
except (AttributeError, KeyError):
raise webob.exc.HTTPBadRequest()
# Authenticated!
req.headers['X-Auth-Token'] = token_id
return self.application
|
chenjun0210/tensorflow | refs/heads/master | tensorflow/python/platform/logging_test.py | 210 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
class EventLoaderTest(googletest.TestCase):
def test_log(self):
# Just check that logging works without raising an exception.
logging.error("test log message")
if __name__ == "__main__":
googletest.main()
|
procangroup/edx-platform | refs/heads/master | common/lib/sandbox-packages/eia.py | 193 | """
Standard resistor values.
Commonly used for verifying electronic components in circuit classes are
standard values, or conversely, for generating realistic component
values in parameterized problems. For details, see:
http://en.wikipedia.org/wiki/Electronic_color_code
"""
# pylint: disable=invalid-name
# r is standard name for a resistor. We would like to use it as such.
import math
import numbers
E6 = [10, 15, 22, 33, 47, 68]
E12 = [10, 12, 15, 18, 22, 27, 33, 39, 47, 56, 68, 82]
E24 = [10, 12, 15, 18, 22, 27, 33, 39, 47, 56, 68, 82, 11, 13, 16, 20,
24, 30, 36, 43, 51, 62, 75, 91]
E48 = [100, 121, 147, 178, 215, 261, 316, 383, 464, 562, 681, 825, 105,
127, 154, 187, 226, 274, 332, 402, 487, 590, 715, 866, 110, 133,
162, 196, 237, 287, 348, 422, 511, 619, 750, 909, 115, 140, 169,
205, 249, 301, 365, 442, 536, 649, 787, 953]
E96 = [100, 121, 147, 178, 215, 261, 316, 383, 464, 562, 681, 825, 102,
124, 150, 182, 221, 267, 324, 392, 475, 576, 698, 845, 105, 127,
154, 187, 226, 274, 332, 402, 487, 590, 715, 866, 107, 130, 158,
191, 232, 280, 340, 412, 499, 604, 732, 887, 110, 133, 162, 196,
237, 287, 348, 422, 511, 619, 750, 909, 113, 137, 165, 200, 243,
294, 357, 432, 523, 634, 768, 931, 115, 140, 169, 205, 249, 301,
365, 442, 536, 649, 787, 953, 118, 143, 174, 210, 255, 309, 374,
453, 549, 665, 806, 976]
E192 = [100, 121, 147, 178, 215, 261, 316, 383, 464, 562, 681, 825, 101,
123, 149, 180, 218, 264, 320, 388, 470, 569, 690, 835, 102, 124,
150, 182, 221, 267, 324, 392, 475, 576, 698, 845, 104, 126, 152,
184, 223, 271, 328, 397, 481, 583, 706, 856, 105, 127, 154, 187,
226, 274, 332, 402, 487, 590, 715, 866, 106, 129, 156, 189, 229,
277, 336, 407, 493, 597, 723, 876, 107, 130, 158, 191, 232, 280,
340, 412, 499, 604, 732, 887, 109, 132, 160, 193, 234, 284, 344,
417, 505, 612, 741, 898, 110, 133, 162, 196, 237, 287, 348, 422,
511, 619, 750, 909, 111, 135, 164, 198, 240, 291, 352, 427, 517,
626, 759, 920, 113, 137, 165, 200, 243, 294, 357, 432, 523, 634,
768, 931, 114, 138, 167, 203, 246, 298, 361, 437, 530, 642, 777,
942, 115, 140, 169, 205, 249, 301, 365, 442, 536, 649, 787, 953,
117, 142, 172, 208, 252, 305, 370, 448, 542, 657, 796, 965, 118,
143, 174, 210, 255, 309, 374, 453, 549, 665, 806, 976, 120, 145,
176, 213, 258, 312, 379, 459, 556, 673, 816, 988]
def iseia(r, valid_types=(E6, E12, E24)):
'''
Check if a component is a valid EIA value.
By default, check 5% component values
'''
# Step 1: Discount things which are not numbers
if not isinstance(r, numbers.Number) or \
r < 0 or \
math.isnan(r) or \
math.isinf(r):
return False
# Special case: 0 is an okay resistor
if r == 0:
return True
# Step 2: Move into the range [100, 1000)
while r < 100:
r = r * 10
while r >= 1000:
r = r / 10
# Step 3: Discount things which are not integers, and cast to int
if abs(r - round(r)) > 0.01:
return False
r = int(round(r))
# Step 4: Check if we're a valid EIA value
for type_list in valid_types:
if r in type_list:
return True
if int(r / 10.) in type_list and (r % 10) == 0:
return True
return False
if __name__ == '__main__':
# Test cases. All of these should return True
print iseia(100) # 100 ohm resistor is EIA
print not iseia(101) # 101 is not
print not iseia(100.3) # Floating point close to EIA is not EIA
print iseia(100.001) # But within floating point error is
print iseia(1e5) # We handle big numbers well
print iseia(2200) # We handle middle-of-the-list well
# We can handle 1% components correctly; 2.2k is EIA24, but not EIA48.
print not iseia(2200, (E48, E96, E192))
print iseia(5490e2, (E48, E96, E192))
print iseia(2200)
print not iseia(5490e2)
print iseia(1e-5) # We handle little numbers well
print not iseia("Hello") # Junk handled okay
print not iseia(float('NaN'))
print not iseia(-1)
print not iseia(iseia)
print not iseia(float('Inf'))
print iseia(0) # Corner case. 0 is a standard resistor value.
|
liyu1990/sklearn | refs/heads/master | sklearn/utils/graph.py | 289 | """
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <[email protected]>
# Gael Varoquaux <[email protected]>
# Jake Vanderplas <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .validation import check_array
from .graph_shortest_path import graph_shortest_path
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph: sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> single_source_shortest_path_length(graph, 0)
{0: 0, 1: 1, 2: 2, 3: 3}
>>> single_source_shortest_path_length(np.ones((6, 6)), 2)
{0: 1, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1}
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
if hasattr(sparse, 'connected_components'):
connected_components = sparse.connected_components
else:
from .sparsetools import connected_components
###############################################################################
# Graph laplacian
def graph_laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = check_array(csgraph, dtype=np.float64, accept_sparse=True)
if sparse.isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = sparse.coo_matrix((new_data, (new_row, new_col)),
shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(
lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype)
else:
lap.flat[::n_nodes + 1] = w.astype(lap.dtype)
if return_diag:
return lap, w
return lap
|
SRabbelier/Melange | refs/heads/master | app/soc/logic/models/presence_with_tos.py | 2 | #!/usr/bin/env python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PresenceWithToS (Model) query functions.
"""
__authors__ = [
'"Sverre Rabbelier" <[email protected]>',
]
from soc.logic.models import presence
import soc.models.presence_with_tos
class Logic(presence.Logic):
"""Logic methods for the PresenceWithToS model.
"""
def __init__(self, model=soc.models.presence_with_tos.PresenceWithToS,
base_model=None, scope_logic=presence):
"""Defines the name, key_name and model for this entity.
"""
super(Logic, self).__init__(model=model, base_model=base_model,
scope_logic=scope_logic)
logic = Logic()
|
alfa-addon/addon | refs/heads/master | plugin.video.alfa/channels/url.py | 1 | # -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="search", title=config.get_localized_string(60089), list_type='server'))
itemlist.append(Item(channel=item.channel, action="search", title=config.get_localized_string(60090), list_type='direct'))
itemlist.append(Item(channel=item.channel, action="search", title=config.get_localized_string(60091), list_type='findvideos'))
return itemlist
# Al llamarse "search" la función, el launcher pide un texto a buscar y lo añade como parámetro
def search(item, texto):
logger.info("texto=" + texto)
if not texto.startswith("http"):
texto = "http://" + texto
itemlist = []
if "server" in item.list_type:
itemlist = servertools.find_video_items(data=texto)
for item in itemlist:
item.channel = "url"
item.action = "play"
elif "direct" in item.list_type:
itemlist.append(Item(channel=item.channel, action="play", url=texto, server="directo", title=config.get_localized_string(60092)))
else:
data = httptools.downloadpage(texto).data
itemlist = servertools.find_video_items(data=data)
for item in itemlist:
item.channel = "url"
item.action = "play"
if len(itemlist) == 0:
itemlist.append(Item(channel=item.channel, action="search", title=config.get_localized_string(60093)))
return itemlist
|
bdang2012/taiga-back | refs/heads/master | taiga/projects/userstories/serializers.py | 1 | # Copyright (C) 2014 Andrey Antukh <[email protected]>
# Copyright (C) 2014 Jesús Espino <[email protected]>
# Copyright (C) 2014 David Barragán <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.apps import apps
from taiga.base.api import serializers
from taiga.base.fields import TagsField
from taiga.base.fields import PgArrayField
from taiga.base.neighbors import NeighborsSerializerMixin
from taiga.base.utils import json
from taiga.mdrender.service import render as mdrender
from taiga.projects.validators import ProjectExistsValidator
from taiga.projects.validators import UserStoryStatusExistsValidator
from taiga.projects.userstories.validators import UserStoryExistsValidator
from taiga.projects.notifications.validators import WatchersValidator
from taiga.projects.serializers import BasicUserStoryStatusSerializer
from taiga.users.serializers import BasicInfoSerializer as UserBasicInfoSerializer
from . import models
class RolePointsField(serializers.WritableField):
def to_native(self, obj):
return {str(o.role.id): o.points.id for o in obj.all()}
def from_native(self, obj):
if isinstance(obj, dict):
return obj
return json.loads(obj)
class UserStorySerializer(WatchersValidator, serializers.ModelSerializer):
tags = TagsField(default=[], required=False)
external_reference = PgArrayField(required=False)
points = RolePointsField(source="role_points", required=False)
total_points = serializers.SerializerMethodField("get_total_points")
comment = serializers.SerializerMethodField("get_comment")
milestone_slug = serializers.SerializerMethodField("get_milestone_slug")
milestone_name = serializers.SerializerMethodField("get_milestone_name")
origin_issue = serializers.SerializerMethodField("get_origin_issue")
blocked_note_html = serializers.SerializerMethodField("get_blocked_note_html")
description_html = serializers.SerializerMethodField("get_description_html")
status_extra_info = BasicUserStoryStatusSerializer(source="status", required=False, read_only=True)
assigned_to_extra_info = UserBasicInfoSerializer(source="assigned_to", required=False, read_only=True)
class Meta:
model = models.UserStory
depth = 0
read_only_fields = ('created_date', 'modified_date')
def get_total_points(self, obj):
return obj.get_total_points()
def get_comment(self, obj):
# NOTE: This method and field is necessary to historical comments work
return ""
def get_milestone_slug(self, obj):
if obj.milestone:
return obj.milestone.slug
else:
return None
def get_milestone_name(self, obj):
if obj.milestone:
return obj.milestone.name
else:
return None
def get_origin_issue(self, obj):
if obj.generated_from_issue:
return {
"id": obj.generated_from_issue.id,
"ref": obj.generated_from_issue.ref,
"subject": obj.generated_from_issue.subject,
}
return None
def get_blocked_note_html(self, obj):
return mdrender(obj.project, obj.blocked_note)
def get_description_html(self, obj):
return mdrender(obj.project, obj.description)
class UserStoryListSerializer(UserStorySerializer):
class Meta:
model = models.UserStory
depth = 0
read_only_fields = ('created_date', 'modified_date')
exclude=("description", "description_html")
class UserStoryNeighborsSerializer(NeighborsSerializerMixin, UserStorySerializer):
def serialize_neighbor(self, neighbor):
return NeighborUserStorySerializer(neighbor).data
class NeighborUserStorySerializer(serializers.ModelSerializer):
class Meta:
model = models.UserStory
fields = ("id", "ref", "subject")
depth = 0
class UserStoriesBulkSerializer(ProjectExistsValidator, UserStoryStatusExistsValidator, serializers.Serializer):
project_id = serializers.IntegerField()
status_id = serializers.IntegerField(required=False)
bulk_stories = serializers.CharField()
## Order bulk serializers
class _UserStoryOrderBulkSerializer(UserStoryExistsValidator, serializers.Serializer):
us_id = serializers.IntegerField()
order = serializers.IntegerField()
class UpdateUserStoriesOrderBulkSerializer(ProjectExistsValidator, UserStoryStatusExistsValidator, serializers.Serializer):
project_id = serializers.IntegerField()
bulk_stories = _UserStoryOrderBulkSerializer(many=True)
|
TeachAtTUM/edx-platform | refs/heads/master | openedx/core/djangoapps/catalog/admin.py | 24 | """
Django admin bindings for catalog support models.
"""
from config_models.admin import ConfigurationModelAdmin
from django.contrib import admin
from openedx.core.djangoapps.catalog.models import CatalogIntegration
admin.site.register(CatalogIntegration, ConfigurationModelAdmin)
|
moijes12/oh-mainline | refs/heads/master | vendor/packages/gdata/tests/gdata_tests/marketplace/live_client_test.py | 39 | #!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
# These tests attempt to connect to Google servers.
__author__ = 'Alexandre Vivien <[email protected]>'
import unittest
import gdata.client
import gdata.data
import gdata.gauth
import gdata.marketplace.client
import gdata.marketplace.data
import gdata.test_config as conf
conf.options.register_option(conf.APPS_DOMAIN_OPTION)
class LicensingClientTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
gdata.test_config.options.register(
'appsid',
'Enter the Application ID of your Marketplace application',
description='The Application ID of your Marketplace application')
gdata.test_config.options.register(
'appsconsumerkey',
'Enter the Consumer Key of your Marketplace application',
description='The Consumer Key of your Marketplace application')
gdata.test_config.options.register(
'appsconsumersecret',
'Enter the Consumer Secret of your Marketplace application',
description='The Consumer Secret of your Marketplace application')
def setUp(self):
self.client = gdata.marketplace.client.LicensingClient(domain='example.com')
if conf.options.get_value('runlive') == 'true':
self.client = gdata.marketplace.client.LicensingClient(domain=conf.options.get_value('appsdomain'))
conf.configure_client(self.client, 'LicensingClientTest', self.client.auth_service, True)
self.client.auth_token = gdata.gauth.TwoLeggedOAuthHmacToken(conf.options.get_value('appsconsumerkey'), conf.options.get_value('appsconsumersecret'), '')
self.client.source = 'GData-Python-Client-Test'
self.client.account_type='HOSTED'
self.client.http_client.debug = True
self.app_id = conf.options.get_value('appsid')
def tearDown(self):
conf.close_client(self.client)
def testGetLicense(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testGetLicense')
fetched_feed = self.client.GetLicense(app_id=self.app_id)
self.assertTrue(isinstance(fetched_feed, gdata.marketplace.data.LicenseFeed))
self.assertTrue(isinstance(fetched_feed.entry[0], gdata.marketplace.data.LicenseEntry))
entity = fetched_feed.entry[0].content.entity
self.assertTrue(entity is not None)
self.assertNotEqual(entity.id, '')
self.assertNotEqual(entity.enabled, '')
self.assertNotEqual(entity.customer_id, '')
self.assertNotEqual(entity.state, '')
def testGetLicenseNotifications(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testGetLicenseNotifications')
fetched_feed = self.client.GetLicenseNotifications(app_id=self.app_id, max_results=2)
self.assertTrue(isinstance(fetched_feed, gdata.marketplace.data.LicenseFeed))
self.assertEqual(len(fetched_feed.entry), 2)
for entry in fetched_feed.entry:
entity = entry.content.entity
self.assertTrue(entity is not None)
self.assertNotEqual(entity.id, '')
self.assertNotEqual(entity.domain_name, '')
self.assertNotEqual(entity.installer_email, '')
self.assertNotEqual(entity.tos_acceptance_time, '')
self.assertNotEqual(entity.last_change_time, '')
self.assertNotEqual(entity.product_config_id, '')
self.assertNotEqual(entity.state, '')
next_uri = fetched_feed.find_next_link()
fetched_feed_next = self.client.GetLicenseNotifications(uri=next_uri)
self.assertTrue(isinstance(fetched_feed_next, gdata.marketplace.data.LicenseFeed))
self.assertTrue(len(fetched_feed_next.entry) <= 2)
for entry in fetched_feed_next.entry:
entity = entry.content.entity
self.assertTrue(entity is not None)
self.assertNotEqual(entity.id, '')
self.assertNotEqual(entity.domain_name, '')
self.assertNotEqual(entity.installer_email, '')
self.assertNotEqual(entity.tos_acceptance_time, '')
self.assertNotEqual(entity.last_change_time, '')
self.assertNotEqual(entity.product_config_id, '')
self.assertNotEqual(entity.state, '')
def suite():
return conf.build_suite([LicensingClientTest])
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
|
G-P-S/depot_tools | refs/heads/master | third_party/gsutil/gslib/commands/chacl.py | 50 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provides the chacl command to gsutil.
This command allows users to easily specify changes to access control lists.
"""
import random
import re
import time
from xml.dom import minidom
from boto.exception import GSResponseError
from boto.gs import acl
from gslib import name_expansion
from gslib.command import Command
from gslib.command import COMMAND_NAME
from gslib.command import COMMAND_NAME_ALIASES
from gslib.command import CONFIG_REQUIRED
from gslib.command import FILE_URIS_OK
from gslib.command import MAX_ARGS
from gslib.command import MIN_ARGS
from gslib.command import PROVIDER_URIS_OK
from gslib.command import SUPPORTED_SUB_ARGS
from gslib.command import URIS_START_ARG
from gslib.exception import CommandException
from gslib.help_provider import HELP_NAME
from gslib.help_provider import HELP_NAME_ALIASES
from gslib.help_provider import HELP_ONE_LINE_SUMMARY
from gslib.help_provider import HELP_TEXT
from gslib.help_provider import HELP_TYPE
from gslib.help_provider import HelpType
from gslib.util import NO_MAX
from gslib.util import Retry
class ChangeType(object):
USER = 'User'
GROUP = 'Group'
class AclChange(object):
"""Represents a logical change to an access control list."""
public_scopes = ['AllAuthenticatedUsers', 'AllUsers']
id_scopes = ['UserById', 'GroupById']
email_scopes = ['UserByEmail', 'GroupByEmail']
domain_scopes = ['GroupByDomain']
scope_types = public_scopes + id_scopes + email_scopes + domain_scopes
permission_shorthand_mapping = {
'R': 'READ',
'W': 'WRITE',
'FC': 'FULL_CONTROL',
}
def __init__(self, acl_change_descriptor, scope_type, logger):
"""Creates an AclChange object.
acl_change_descriptor: An acl change as described in chacl help.
scope_type: Either ChangeType.USER or ChangeType.GROUP, specifying the
extent of the scope.
logger: An instance of ThreadedLogger.
"""
self.logger = logger
self.identifier = ''
self.raw_descriptor = acl_change_descriptor
self._Parse(acl_change_descriptor, scope_type)
self._Validate()
def __str__(self):
return 'AclChange<{0}|{1}|{2}>'.format(self.scope_type, self.perm,
self.identifier)
def _Parse(self, change_descriptor, scope_type):
"""Parses an ACL Change descriptor."""
def _ClassifyScopeIdentifier(text):
re_map = {
'AllAuthenticatedUsers': r'^(AllAuthenticatedUsers|AllAuth)$',
'AllUsers': '^(AllUsers|All)$',
'Email': r'^.+@.+\..+$',
'Id': r'^[0-9A-Fa-f]{64}$',
'Domain': r'^[^@]+\..+$',
}
for type_string, regex in re_map.items():
if re.match(regex, text, re.IGNORECASE):
return type_string
if change_descriptor.count(':') != 1:
raise CommandException('{0} is an invalid change description.'
.format(change_descriptor))
scope_string, perm_token = change_descriptor.split(':')
perm_token = perm_token.upper()
if perm_token in self.permission_shorthand_mapping:
self.perm = self.permission_shorthand_mapping[perm_token]
else:
self.perm = perm_token
scope_class = _ClassifyScopeIdentifier(scope_string)
if scope_class == 'Domain':
# This may produce an invalid UserByDomain scope,
# which is good because then validate can complain.
self.scope_type = '{0}ByDomain'.format(scope_type)
self.identifier = scope_string
elif scope_class in ['Email', 'Id']:
self.scope_type = '{0}By{1}'.format(scope_type, scope_class)
self.identifier = scope_string
elif scope_class == 'AllAuthenticatedUsers':
self.scope_type = 'AllAuthenticatedUsers'
elif scope_class == 'AllUsers':
self.scope_type = 'AllUsers'
else:
# This is just a fallback, so we set it to something
# and the validate step has something to go on.
self.scope_type = scope_string
def _Validate(self):
"""Validates a parsed AclChange object."""
def _ThrowError(msg):
raise CommandException('{0} is not a valid ACL change\n{1}'
.format(self.raw_descriptor, msg))
if self.scope_type not in self.scope_types:
_ThrowError('{0} is not a valid scope type'.format(self.scope_type))
if self.scope_type in self.public_scopes and self.identifier:
_ThrowError('{0} requires no arguments'.format(self.scope_type))
if self.scope_type in self.id_scopes and not self.identifier:
_ThrowError('{0} requires an id'.format(self.scope_type))
if self.scope_type in self.email_scopes and not self.identifier:
_ThrowError('{0} requires an email address'.format(self.scope_type))
if self.scope_type in self.domain_scopes and not self.identifier:
_ThrowError('{0} requires domain'.format(self.scope_type))
if self.perm not in self.permission_shorthand_mapping.values():
perms = ', '.join(self.permission_shorthand_mapping.values())
_ThrowError('Allowed permissions are {0}'.format(perms))
def _YieldMatchingEntries(self, current_acl):
"""Generator that yields entries that match the change descriptor.
current_acl: An instance of bogo.gs.acl.ACL which will be searched
for matching entries.
"""
for entry in current_acl.entries.entry_list:
if entry.scope.type == self.scope_type:
if self.scope_type in ['UserById', 'GroupById']:
if self.identifier == entry.scope.id:
yield entry
elif self.scope_type in ['UserByEmail', 'GroupByEmail']:
if self.identifier == entry.scope.email_address:
yield entry
elif self.scope_type == 'GroupByDomain':
if self.identifier == entry.scope.domain:
yield entry
elif self.scope_type in ['AllUsers', 'AllAuthenticatedUsers']:
yield entry
else:
raise CommandException('Found an unrecognized ACL '
'entry type, aborting.')
def _AddEntry(self, current_acl):
"""Adds an entry to an ACL."""
if self.scope_type in ['UserById', 'UserById', 'GroupById']:
entry = acl.Entry(type=self.scope_type, permission=self.perm,
id=self.identifier)
elif self.scope_type in ['UserByEmail', 'GroupByEmail']:
entry = acl.Entry(type=self.scope_type, permission=self.perm,
email_address=self.identifier)
elif self.scope_type == 'GroupByDomain':
entry = acl.Entry(type=self.scope_type, permission=self.perm,
domain=self.identifier)
else:
entry = acl.Entry(type=self.scope_type, permission=self.perm)
current_acl.entries.entry_list.append(entry)
def Execute(self, uri, current_acl):
"""Executes the described change on an ACL.
uri: The URI object to change.
current_acl: An instance of boto.gs.acl.ACL to permute.
"""
self.logger.debug('Executing {0} on {1}'
.format(self.raw_descriptor, uri))
if self.perm == 'WRITE' and uri.names_object():
self.logger.warn(
'Skipping {0} on {1}, as WRITE does not apply to objects'
.format(self.raw_descriptor, uri))
return 0
matching_entries = list(self._YieldMatchingEntries(current_acl))
change_count = 0
if matching_entries:
for entry in matching_entries:
if entry.permission != self.perm:
entry.permission = self.perm
change_count += 1
else:
self._AddEntry(current_acl)
change_count = 1
parsed_acl = minidom.parseString(current_acl.to_xml())
self.logger.debug('New Acl:\n{0}'.format(parsed_acl.toprettyxml()))
return change_count
class AclDel(AclChange):
"""Represents a logical change from an access control list."""
scope_regexes = {
r'All(Users)?': 'AllUsers',
r'AllAuth(enticatedUsers)?': 'AllAuthenticatedUsers',
}
def __init__(self, identifier, logger):
self.raw_descriptor = '-d {0}'.format(identifier)
self.logger = logger
self.identifier = identifier
for regex, scope in self.scope_regexes.items():
if re.match(regex, self.identifier, re.IGNORECASE):
self.identifier = scope
self.scope_type = 'Any'
self.perm = 'NONE'
def _YieldMatchingEntries(self, current_acl):
for entry in current_acl.entries.entry_list:
if self.identifier == entry.scope.id:
yield entry
elif self.identifier == entry.scope.email_address:
yield entry
elif self.identifier == entry.scope.domain:
yield entry
elif self.identifier == 'AllUsers' and entry.scope.type == 'AllUsers':
yield entry
elif (self.identifier == 'AllAuthenticatedUsers'
and entry.scope.type == 'AllAuthenticatedUsers'):
yield entry
def Execute(self, uri, current_acl):
self.logger.debug('Executing {0} on {1}'
.format(self.raw_descriptor, uri))
matching_entries = list(self._YieldMatchingEntries(current_acl))
for entry in matching_entries:
current_acl.entries.entry_list.remove(entry)
parsed_acl = minidom.parseString(current_acl.to_xml())
self.logger.debug('New Acl:\n{0}'.format(parsed_acl.toprettyxml()))
return len(matching_entries)
_detailed_help_text = ("""
<B>SYNOPSIS</B>
gsutil chacl [-R] -u|-g|-d <grant>... uri...
where each <grant> is one of the following forms:
-u <id|email>:<perm>
-g <id|email|domain|All|AllAuth>:<perm>
-d <id|email|domain|All|AllAuth>
<B>DESCRIPTION</B>
The chacl command updates access control lists, similar in spirit to the Linux
chmod command. You can specify multiple access grant additions and deletions
in a single command run; all changes will be made atomically to each object in
turn. For example, if the command requests deleting one grant and adding a
different grant, the ACLs being updated will never be left in an intermediate
state where one grant has been deleted but the second grant not yet added.
Each change specifies a user or group grant to add or delete, and for grant
additions, one of R, W, FC (for the permission to be granted). A more formal
description is provided in a later section; below we provide examples.
Note: If you want to set a simple "canned" ACL on each object (such as
project-private or public), or if you prefer to edit the XML representation
for ACLs, you can do that with the setacl command (see 'gsutil help setacl').
<B>EXAMPLES</B>
Grant the user [email protected] WRITE access to the bucket
example-bucket:
gsutil chacl -u [email protected]:WRITE gs://example-bucket
Grant the group [email protected] FULL_CONTROL access to all jpg files in
the top level of example-bucket:
gsutil chacl -g [email protected]:FC gs://example-bucket/*.jpg
Grant the user with the specified canonical ID READ access to all objects in
example-bucket that begin with folder/:
gsutil chacl -R \\
-u 84fac329bceSAMPLE777d5d22b8SAMPLE77d85ac2SAMPLE2dfcf7c4adf34da46:R \\
gs://example-bucket/folder/
Grant all users from my-domain.org READ access to the bucket
gcs.my-domain.org:
gsutil chacl -g my-domain.org:R gs://gcs.my-domain.org
Remove any current access by [email protected] from the bucket
example-bucket:
gsutil chacl -d [email protected] gs://example-bucket
If you have a large number of objects to update, enabling multi-threading with
the gsutil -m flag can significantly improve performance. The following
command adds FULL_CONTROL for [email protected] using multi-threading:
gsutil -m chacl -R -u [email protected]:FC gs://example-bucket
Grant READ access to everyone from my-domain.org and to all authenticated
users, and grant FULL_CONTROL to [email protected], for the buckets
my-bucket and my-other-bucket, with multi-threading enabled:
gsutil -m chacl -R -g my-domain.org:R -g AllAuth:R \\
-u [email protected]:FC gs://my-bucket/ gs://my-other-bucket
<B>SCOPES</B>
There are four different scopes: Users, Groups, All Authenticated Users, and
All Users.
Users are added with -u and a plain ID or email address, as in
"-u [email protected]:r"
Groups are like users, but specified with the -g flag, as in
"-g [email protected]:fc". Groups may also be specified as a full
domain, as in "-g my-company.com:r".
AllAuthenticatedUsers and AllUsers are specified directly, as
in "-g AllUsers:R" or "-g AllAuthenticatedUsers:FC". These are case
insensitive, and may be shortened to "all" and "allauth", respectively.
Removing permissions is specified with the -d flag and an ID, email
address, domain, or one of AllUsers or AllAuthenticatedUsers.
Many scopes can be specified on the same command line, allowing bundled
changes to be executed in a single run. This will reduce the number of
requests made to the server.
<B>PERMISSIONS</B>
You may specify the following permissions with either their shorthand or
their full name:
R: READ
W: WRITE
FC: FULL_CONTROL
<B>OPTIONS</B>
-R, -r Performs chacl request recursively, to all objects under the
specified URI.
-u Add or modify a user permission as specified in the SCOPES
and PERMISSIONS sections.
-g Add or modify a group permission as specified in the SCOPES
and PERMISSIONS sections.
-d Remove all permissions associated with the matching argument, as
specified in the SCOPES and PERMISSIONS sections.
""")
class ChAclCommand(Command):
"""Implementation of gsutil chacl command."""
# Command specification (processed by parent class).
command_spec = {
# Name of command.
COMMAND_NAME : 'chacl',
# List of command name aliases.
COMMAND_NAME_ALIASES : [],
# Min number of args required by this command.
MIN_ARGS : 1,
# Max number of args required by this command, or NO_MAX.
MAX_ARGS : NO_MAX,
# Getopt-style string specifying acceptable sub args.
SUPPORTED_SUB_ARGS : 'Rrfg:u:d:',
# True if file URIs acceptable for this command.
FILE_URIS_OK : False,
# True if provider-only URIs acceptable for this command.
PROVIDER_URIS_OK : False,
# Index in args of first URI arg.
URIS_START_ARG : 1,
# True if must configure gsutil before running command.
CONFIG_REQUIRED : True,
}
help_spec = {
# Name of command or auxiliary help info for which this help applies.
HELP_NAME : 'chacl',
# List of help name aliases.
HELP_NAME_ALIASES : ['chmod'],
# Type of help:
HELP_TYPE : HelpType.COMMAND_HELP,
# One line summary of this help.
HELP_ONE_LINE_SUMMARY : 'Add / remove entries on bucket and/or object ACLs',
# The full help text.
HELP_TEXT : _detailed_help_text,
}
# Command entry point.
def RunCommand(self):
"""This is the point of entry for the chacl command."""
self.parse_versions = True
self.changes = []
if self.sub_opts:
for o, a in self.sub_opts:
if o == '-g':
self.changes.append(AclChange(a, scope_type=ChangeType.GROUP,
logger=self.THREADED_LOGGER))
if o == '-u':
self.changes.append(AclChange(a, scope_type=ChangeType.USER,
logger=self.THREADED_LOGGER))
if o == '-d':
self.changes.append(AclDel(a, logger=self.THREADED_LOGGER))
if not self.changes:
raise CommandException(
'Please specify at least one access change '
'with the -g, -u, or -d flags')
storage_uri = self.UrisAreForSingleProvider(self.args)
if not (storage_uri and storage_uri.get_provider().name == 'google'):
raise CommandException('The "{0}" command can only be used with gs:// URIs'
.format(self.command_name))
bulk_uris = set()
for uri_arg in self.args:
for result in self.WildcardIterator(uri_arg):
uri = result.uri
if uri.names_bucket():
if self.recursion_requested:
bulk_uris.add(uri.clone_replace_name('*').uri)
else:
# If applying to a bucket directly, the threading machinery will
# break, so we have to apply now, in the main thread.
self.ApplyAclChanges(uri)
else:
bulk_uris.add(uri_arg)
try:
name_expansion_iterator = name_expansion.NameExpansionIterator(
self.command_name, self.proj_id_handler, self.headers, self.debug,
self.bucket_storage_uri_class, bulk_uris, self.recursion_requested)
except CommandException as e:
# NameExpansionIterator will complain if there are no URIs, but we don't
# want to throw an error if we handled bucket URIs.
if e.reason == 'No URIs matched':
return 0
else:
raise e
self.everything_set_okay = True
self.Apply(self.ApplyAclChanges,
name_expansion_iterator,
self._ApplyExceptionHandler)
if not self.everything_set_okay:
raise CommandException('ACLs for some objects could not be set.')
return 0
def _ApplyExceptionHandler(self, exception):
self.THREADED_LOGGER.error('Encountered a problem: {0}'.format(exception))
self.everything_set_okay = False
@Retry(GSResponseError, tries=3, delay=1, backoff=2)
def ApplyAclChanges(self, uri_or_expansion_result):
"""Applies the changes in self.changes to the provided URI."""
if isinstance(uri_or_expansion_result, name_expansion.NameExpansionResult):
uri = self.suri_builder.StorageUri(
uri_or_expansion_result.expanded_uri_str)
else:
uri = uri_or_expansion_result
try:
current_acl = uri.get_acl()
except GSResponseError as e:
self.THREADED_LOGGER.warning('Failed to set acl for {0}: {1}'
.format(uri, e.reason))
return
modification_count = 0
for change in self.changes:
modification_count += change.Execute(uri, current_acl)
if modification_count == 0:
self.THREADED_LOGGER.info('No changes to {0}'.format(uri))
return
# TODO: Remove the concept of forcing when boto provides access to
# bucket generation and meta_generation.
headers = dict(self.headers)
force = uri.names_bucket()
if not force:
key = uri.get_key()
headers['x-goog-if-generation-match'] = key.generation
headers['x-goog-if-metageneration-match'] = key.meta_generation
# If this fails because of a precondition, it will raise a
# GSResponseError for @Retry to handle.
uri.set_acl(current_acl, uri.object_name, False, headers)
self.THREADED_LOGGER.info('Updated ACL on {0}'.format(uri))
|
Spacecraft-Code/SPELL | refs/heads/master | drivers/example/src/__init__.py | 2 | ###################################################################################
## MODULE : __init__
## DATE : Mar 18, 2011
## PROJECT : SPELL
## DESCRIPTION: Module initialization
## --------------------------------------------------------------------------------
##
## Copyright (C) 2008, 2015 SES ENGINEERING, Luxembourg S.A.R.L.
##
## This file is part of SPELL.
##
## This component is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This software is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with SPELL. If not, see <http://www.gnu.org/licenses/>.
##
###################################################################################
|
romankagan/DDBWorkbench | refs/heads/master | python/testData/intentions/PyStringConcatenationToFormatIntentionTest/escapingPy3_after.py | 83 | string = "string"
some_string = "some \\ \" escaping {0}".format(string)
|
pchauncey/ansible | refs/heads/devel | lib/ansible/plugins/action/netconf_config.py | 118 | #
# Copyright 2016 Peter Sprygada <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
from ansible.plugins.action.net_config import ActionModule as NetActionModule
class ActionModule(NetActionModule, ActionBase):
pass
|
mdda/fossasia-2016_deep-learning | refs/heads/master | notebooks/models/imagenet_theano/imagenet.py | 2 |
def get_synset(path='../data/imagenet_synset_words.txt'):
with open(path, 'r') as f:
# Strip off the first word (until space, maxsplit=1), then synset is remainder
return [ line.strip().split(' ', 1)[1] for line in f]
|
abdoosh00/edraak | refs/heads/master | cms/lib/xblock/test/test_runtime.py | 39 | """
Tests of edX Studio runtime functionality
"""
from urlparse import urlparse
from mock import Mock
from unittest import TestCase
from cms.lib.xblock.runtime import handler_url
class TestHandlerUrl(TestCase):
"""Test the LMS handler_url"""
def setUp(self):
self.block = Mock()
def test_trailing_charecters(self):
self.assertFalse(handler_url(self.block, 'handler').endswith('?'))
self.assertFalse(handler_url(self.block, 'handler').endswith('/'))
self.assertFalse(handler_url(self.block, 'handler', 'suffix').endswith('?'))
self.assertFalse(handler_url(self.block, 'handler', 'suffix').endswith('/'))
self.assertFalse(handler_url(self.block, 'handler', 'suffix', 'query').endswith('?'))
self.assertFalse(handler_url(self.block, 'handler', 'suffix', 'query').endswith('/'))
self.assertFalse(handler_url(self.block, 'handler', query='query').endswith('?'))
self.assertFalse(handler_url(self.block, 'handler', query='query').endswith('/'))
def _parsed_query(self, query_string):
"""Return the parsed query string from a handler_url generated with the supplied query_string"""
return urlparse(handler_url(self.block, 'handler', query=query_string)).query
def test_query_string(self):
self.assertIn('foo=bar', self._parsed_query('foo=bar'))
self.assertIn('foo=bar&baz=true', self._parsed_query('foo=bar&baz=true'))
self.assertIn('foo&bar&baz', self._parsed_query('foo&bar&baz'))
def _parsed_path(self, handler_name='handler', suffix=''):
"""Return the parsed path from a handler_url with the supplied handler_name and suffix"""
return urlparse(handler_url(self.block, handler_name, suffix=suffix)).path
def test_suffix(self):
self.assertTrue(self._parsed_path(suffix="foo").endswith('foo'))
self.assertTrue(self._parsed_path(suffix="foo/bar").endswith('foo/bar'))
self.assertTrue(self._parsed_path(suffix="/foo/bar").endswith('/foo/bar'))
def test_handler_name(self):
self.assertIn('handler1', self._parsed_path('handler1'))
self.assertIn('handler_a', self._parsed_path('handler_a'))
|
jgors/duecredit | refs/heads/master | duecredit/cmdline/cmd_test.py | 1 | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the duecredit package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Run internal DueCredit (unit)tests to verify correct operation on the system"""
__docformat__ = 'restructuredtext'
# magic line for manpage summary
# man: -*- % run unit-tests
from .helpers import parser_add_common_args
import nose
def setup_parser(parser):
# TODO -- pass options such as verbosity etc
pass
def run(args):
import duecredit
raise NotImplementedError("Just use nosetests duecredit for now")
#duecredit.test()
|
Chris7/pyquant | refs/heads/master | setup.py | 1 | from __future__ import print_function
import os
# These exceptions are for building pyquant on lambdas, which parse
# the setup.py file. Sane builders will never hit this
include_dirs = []
try:
import numpy
include_dirs.append(numpy.get_include())
except ImportError:
pass
from setuptools import (
Extension,
setup,
find_packages,
) # noqa: E402
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
extensions = [Extension(name="pyquant.cpeaks", sources=["pyquant/cpeaks.pyx"])]
setup(
name="pyquant",
version="0.4.1",
packages=find_packages(),
scripts=["scripts/pyQuant"],
entry_points={"console_scripts": ["pyQuant = pyquant.command_line:run_pyquant",]},
install_requires=[
"cython",
"numpy",
"scipy >= 0.18.*",
"patsy",
"pythomics >= 0.3.41",
"pandas >= 0.24.0",
"lxml",
"scikit-learn",
"simplejson",
],
include_package_data=True,
description="A framework for the analysis of quantitative mass spectrometry data",
url="http://www.github.com/chris7/pyquant",
author="Chris Mitchell <[email protected]>",
classifiers=[
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
],
setup_requires=["cython", "numpy"],
ext_modules=extensions,
include_dirs=include_dirs,
)
|
algorythmic/bash-completion | refs/heads/master | test/t/test_python3.py | 2 | import pytest
class TestPython3:
@pytest.mark.complete("python3 ")
def test_1(self, completion):
assert completion
@pytest.mark.complete("python3 -", require_cmd=True)
def test_2(self, completion):
assert len(completion) > 1
@pytest.mark.complete("python3 -c ")
def test_3(self, completion):
assert not completion
@pytest.mark.complete("python3 shared/default/")
def test_4(self, completion):
assert completion == ["bar bar.d/", "foo.d/"]
@pytest.mark.complete("python3 -c foo shared/default/")
def test_5(self, completion):
assert completion == ["bar", "bar bar.d/", "foo", "foo.d/"]
@pytest.mark.complete("python3 -c foo -")
def test_6(self, completion):
assert not completion
@pytest.mark.complete("python3 -m foo -")
def test_7(self, completion):
assert not completion
@pytest.mark.complete("python3 -m sy", require_cmd=True)
def test_8(self, completion):
assert completion
@pytest.mark.complete("python3 -m json.", require_cmd=True)
def test_9(self, completion):
assert "json.tool" in completion
|
yjmade/odoo | refs/heads/8.0 | addons/resource/tests/test_resource.py | 243 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp.addons.resource.tests.common import TestResourceCommon
class TestResource(TestResourceCommon):
def test_00_intervals(self):
intervals = [
(
datetime.strptime('2013-02-04 09:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-04 11:00:00', '%Y-%m-%d %H:%M:%S')
), (
datetime.strptime('2013-02-04 08:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-04 12:00:00', '%Y-%m-%d %H:%M:%S')
), (
datetime.strptime('2013-02-04 11:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-04 14:00:00', '%Y-%m-%d %H:%M:%S')
), (
datetime.strptime('2013-02-04 17:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-04 21:00:00', '%Y-%m-%d %H:%M:%S')
), (
datetime.strptime('2013-02-03 08:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-03 10:00:00', '%Y-%m-%d %H:%M:%S')
), (
datetime.strptime('2013-02-04 18:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-04 19:00:00', '%Y-%m-%d %H:%M:%S')
)
]
# Test: interval cleaning
cleaned_intervals = self.resource_calendar.interval_clean(intervals)
self.assertEqual(len(cleaned_intervals), 3, 'resource_calendar: wrong interval cleaning')
# First interval: 03, unchanged
self.assertEqual(cleaned_intervals[0][0], datetime.strptime('2013-02-03 08:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
self.assertEqual(cleaned_intervals[0][1], datetime.strptime('2013-02-03 10:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
# Second intreval: 04, 08-14, combining 08-12 and 11-14, 09-11 being inside 08-12
self.assertEqual(cleaned_intervals[1][0], datetime.strptime('2013-02-04 08:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
self.assertEqual(cleaned_intervals[1][1], datetime.strptime('2013-02-04 14:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
# Third interval: 04, 17-21, 18-19 being inside 17-21
self.assertEqual(cleaned_intervals[2][0], datetime.strptime('2013-02-04 17:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
self.assertEqual(cleaned_intervals[2][1], datetime.strptime('2013-02-04 21:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
# Test: disjoint removal
working_interval = (datetime.strptime('2013-02-04 08:00:00', '%Y-%m-%d %H:%M:%S'), datetime.strptime('2013-02-04 18:00:00', '%Y-%m-%d %H:%M:%S'))
result = self.resource_calendar.interval_remove_leaves(working_interval, intervals)
self.assertEqual(len(result), 1, 'resource_calendar: wrong leave removal from interval')
# First interval: 04, 14-17
self.assertEqual(result[0][0], datetime.strptime('2013-02-04 14:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
self.assertEqual(result[0][1], datetime.strptime('2013-02-04 17:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
# Test: schedule hours on intervals
result = self.resource_calendar.interval_schedule_hours(cleaned_intervals, 5.5)
self.assertEqual(len(result), 2, 'resource_calendar: wrong hours scheduling in interval')
# First interval: 03, 8-10 untouches
self.assertEqual(result[0][0], datetime.strptime('2013-02-03 08:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
self.assertEqual(result[0][1], datetime.strptime('2013-02-03 10:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
# First interval: 04, 08-11:30
self.assertEqual(result[1][0], datetime.strptime('2013-02-04 08:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
self.assertEqual(result[1][1], datetime.strptime('2013-02-04 11:30:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
# Test: schedule hours on intervals, backwards
cleaned_intervals.reverse()
result = self.resource_calendar.interval_schedule_hours(cleaned_intervals, 5.5, remove_at_end=False)
self.assertEqual(len(result), 2, 'resource_calendar: wrong hours scheduling in interval')
# First interval: 03, 8-10 untouches
self.assertEqual(result[0][0], datetime.strptime('2013-02-04 17:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
self.assertEqual(result[0][1], datetime.strptime('2013-02-04 21:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
# First interval: 04, 08-11:30
self.assertEqual(result[1][0], datetime.strptime('2013-02-04 12:30:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
self.assertEqual(result[1][1], datetime.strptime('2013-02-04 14:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
def test_10_calendar_basics(self):
""" Testing basic method of resource.calendar """
cr, uid = self.cr, self.uid
# --------------------------------------------------
# Test1: get_next_day
# --------------------------------------------------
# Test: next day: next day after day1 is day4
date = self.resource_calendar.get_next_day(cr, uid, self.calendar_id, day_date=self.date1.date())
self.assertEqual(date, self.date2.date(), 'resource_calendar: wrong next day computing')
# Test: next day: next day after day4 is (day1+7)
date = self.resource_calendar.get_next_day(cr, uid, self.calendar_id, day_date=self.date2.date())
self.assertEqual(date, self.date1.date() + relativedelta(days=7), 'resource_calendar: wrong next day computing')
# Test: next day: next day after day4+1 is (day1+7)
date = self.resource_calendar.get_next_day(cr, uid, self.calendar_id, day_date=self.date2.date() + relativedelta(days=1))
self.assertEqual(date, self.date1.date() + relativedelta(days=7), 'resource_calendar: wrong next day computing')
# Test: next day: next day after day1-1 is day1
date = self.resource_calendar.get_next_day(cr, uid, self.calendar_id, day_date=self.date1.date() + relativedelta(days=-1))
self.assertEqual(date, self.date1.date(), 'resource_calendar: wrong next day computing')
# --------------------------------------------------
# Test2: get_previous_day
# --------------------------------------------------
# Test: previous day: previous day before day1 is (day4-7)
date = self.resource_calendar.get_previous_day(cr, uid, self.calendar_id, day_date=self.date1.date())
self.assertEqual(date, self.date2.date() + relativedelta(days=-7), 'resource_calendar: wrong previous day computing')
# Test: previous day: previous day before day4 is day1
date = self.resource_calendar.get_previous_day(cr, uid, self.calendar_id, day_date=self.date2.date())
self.assertEqual(date, self.date1.date(), 'resource_calendar: wrong previous day computing')
# Test: previous day: previous day before day4+1 is day4
date = self.resource_calendar.get_previous_day(cr, uid, self.calendar_id, day_date=self.date2.date() + relativedelta(days=1))
self.assertEqual(date, self.date2.date(), 'resource_calendar: wrong previous day computing')
# Test: previous day: previous day before day1-1 is (day4-7)
date = self.resource_calendar.get_previous_day(cr, uid, self.calendar_id, day_date=self.date1.date() + relativedelta(days=-1))
self.assertEqual(date, self.date2.date() + relativedelta(days=-7), 'resource_calendar: wrong previous day computing')
# --------------------------------------------------
# Test3: misc
# --------------------------------------------------
weekdays = self.resource_calendar.get_weekdays(cr, uid, self.calendar_id)
self.assertEqual(weekdays, [1, 4], 'resource_calendar: wrong weekdays computing')
attendances = self.resource_calendar.get_attendances_for_weekdays(cr, uid, self.calendar_id, [2, 3, 4, 5])
self.assertEqual(set([att.id for att in attendances]), set([self.att2_id, self.att3_id]),
'resource_calendar: wrong attendances filtering by weekdays computing')
def test_20_calendar_working_intervals(self):
""" Testing working intervals computing method of resource.calendar """
cr, uid = self.cr, self.uid
_format = '%Y-%m-%d %H:%M:%S'
# Test: day0 without leaves: 1 interval
intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id, start_dt=self.date1)
self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-12 09:08:07', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong working intervals')
# Test: day3 without leaves: 2 interval
intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id, start_dt=self.date2)
self.assertEqual(len(intervals), 2, 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-15 10:11:12', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-15 13:00:00', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[1][0], datetime.strptime('2013-02-15 16:00:00', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[1][1], datetime.strptime('2013-02-15 23:00:00', _format), 'resource_calendar: wrong working intervals')
# Test: day0 with leaves outside range: 1 interval
intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id, start_dt=self.date1.replace(hour=0), compute_leaves=True)
self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-12 08:00:00', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong working intervals')
# Test: day0 with leaves: 2 intervals because of leave between 9 ans 12, ending at 15:45:30
intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id,
start_dt=self.date1.replace(hour=8) + relativedelta(days=7),
end_dt=self.date1.replace(hour=15, minute=45, second=30) + relativedelta(days=7),
compute_leaves=True)
self.assertEqual(len(intervals), 2, 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-19 08:08:07', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-19 09:00:00', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[1][0], datetime.strptime('2013-02-19 12:00:00', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[1][1], datetime.strptime('2013-02-19 15:45:30', _format), 'resource_calendar: wrong working intervals')
def test_30_calendar_working_days(self):
""" Testing calendar hours computation on a working day """
cr, uid = self.cr, self.uid
_format = '%Y-%m-%d %H:%M:%S'
# Test: day1, beginning at 10:30 -> work from 10:30 (arrival) until 16:00
intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id, start_dt=self.date1.replace(hour=10, minute=30, second=0))
self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-12 10:30:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong working interval / day computing')
# Test: hour computation for same interval, should give 5.5
wh = self.resource_calendar.get_working_hours_of_date(cr, uid, self.calendar_id, start_dt=self.date1.replace(hour=10, minute=30, second=0))
self.assertEqual(wh, 5.5, 'resource_calendar: wrong working interval / day time computing')
# Test: day1+7 on leave, without leave computation
intervals = self.resource_calendar.get_working_intervals_of_day(
cr, uid, self.calendar_id,
start_dt=self.date1.replace(hour=7, minute=0, second=0) + relativedelta(days=7)
)
# Result: day1 (08->16)
self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working interval/day computing')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-19 08:00:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-19 16:00:00', _format), 'resource_calendar: wrong working interval / day computing')
# Test: day1+7 on leave, with generic leave computation
intervals = self.resource_calendar.get_working_intervals_of_day(
cr, uid, self.calendar_id,
start_dt=self.date1.replace(hour=7, minute=0, second=0) + relativedelta(days=7),
compute_leaves=True
)
# Result: day1 (08->09 + 12->16)
self.assertEqual(len(intervals), 2, 'resource_calendar: wrong working interval/day computing')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-19 08:00:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-19 09:00:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[1][0], datetime.strptime('2013-02-19 12:00:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[1][1], datetime.strptime('2013-02-19 16:00:00', _format), 'resource_calendar: wrong working interval / day computing')
# Test: day1+14 on leave, with generic leave computation
intervals = self.resource_calendar.get_working_intervals_of_day(
cr, uid, self.calendar_id,
start_dt=self.date1.replace(hour=7, minute=0, second=0) + relativedelta(days=14),
compute_leaves=True
)
# Result: day1 (08->16)
self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working interval/day computing')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-26 08:00:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-26 16:00:00', _format), 'resource_calendar: wrong working interval / day computing')
# Test: day1+14 on leave, with resource leave computation
intervals = self.resource_calendar.get_working_intervals_of_day(
cr, uid, self.calendar_id,
start_dt=self.date1.replace(hour=7, minute=0, second=0) + relativedelta(days=14),
compute_leaves=True,
resource_id=self.resource1_id
)
# Result: nothing, because on leave
self.assertEqual(len(intervals), 0, 'resource_calendar: wrong working interval/day computing')
def test_40_calendar_hours_scheduling(self):
""" Testing calendar hours scheduling """
cr, uid = self.cr, self.uid
_format = '%Y-%m-%d %H:%M:%S'
# --------------------------------------------------
# Test0: schedule hours backwards (old interval_min_get)
# Done without calendar
# --------------------------------------------------
# Done without calendar
# res = self.resource_calendar.interval_min_get(cr, uid, None, self.date1, 40, resource=False)
# res: (datetime.datetime(2013, 2, 7, 9, 8, 7), datetime.datetime(2013, 2, 12, 9, 8, 7))
# --------------------------------------------------
# Test1: schedule hours backwards (old interval_min_get)
# --------------------------------------------------
# res = self.resource_calendar.interval_min_get(cr, uid, self.calendar_id, self.date1, 40, resource=False)
# (datetime.datetime(2013, 1, 29, 9, 0), datetime.datetime(2013, 1, 29, 16, 0))
# (datetime.datetime(2013, 2, 1, 8, 0), datetime.datetime(2013, 2, 1, 13, 0))
# (datetime.datetime(2013, 2, 1, 16, 0), datetime.datetime(2013, 2, 1, 23, 0))
# (datetime.datetime(2013, 2, 5, 8, 0), datetime.datetime(2013, 2, 5, 16, 0))
# (datetime.datetime(2013, 2, 8, 8, 0), datetime.datetime(2013, 2, 8, 13, 0))
# (datetime.datetime(2013, 2, 8, 16, 0), datetime.datetime(2013, 2, 8, 23, 0))
# (datetime.datetime(2013, 2, 12, 8, 0), datetime.datetime(2013, 2, 12, 9, 0))
res = self.resource_calendar.schedule_hours(cr, uid, self.calendar_id, -40, day_dt=self.date1.replace(minute=0, second=0))
# current day, limited at 09:00 because of day_dt specified -> 1 hour
self.assertEqual(res[-1][0], datetime.strptime('2013-02-12 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-1][1], datetime.strptime('2013-02-12 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
# previous days: 5+7 hours / 8 hours / 5+7 hours -> 32 hours
self.assertEqual(res[-2][0], datetime.strptime('2013-02-08 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-2][1], datetime.strptime('2013-02-08 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-3][0], datetime.strptime('2013-02-08 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-3][1], datetime.strptime('2013-02-08 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-4][0], datetime.strptime('2013-02-05 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-4][1], datetime.strptime('2013-02-05 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-5][0], datetime.strptime('2013-02-01 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-5][1], datetime.strptime('2013-02-01 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-6][0], datetime.strptime('2013-02-01 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-6][1], datetime.strptime('2013-02-01 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
# 7 hours remaining
self.assertEqual(res[-7][0], datetime.strptime('2013-01-29 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-7][1], datetime.strptime('2013-01-29 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
# Compute scheduled hours
td = timedelta()
for item in res:
td += item[1] - item[0]
self.assertEqual(seconds(td) / 3600.0, 40.0, 'resource_calendar: wrong hours scheduling')
# --------------------------------------------------
# Test2: schedule hours forward (old interval_get)
# --------------------------------------------------
# res = self.resource_calendar.interval_get(cr, uid, self.calendar_id, self.date1, 40, resource=False, byday=True)
# (datetime.datetime(2013, 2, 12, 9, 0), datetime.datetime(2013, 2, 12, 16, 0))
# (datetime.datetime(2013, 2, 15, 8, 0), datetime.datetime(2013, 2, 15, 13, 0))
# (datetime.datetime(2013, 2, 15, 16, 0), datetime.datetime(2013, 2, 15, 23, 0))
# (datetime.datetime(2013, 2, 22, 8, 0), datetime.datetime(2013, 2, 22, 13, 0))
# (datetime.datetime(2013, 2, 22, 16, 0), datetime.datetime(2013, 2, 22, 23, 0))
# (datetime.datetime(2013, 2, 26, 8, 0), datetime.datetime(2013, 2, 26, 16, 0))
# (datetime.datetime(2013, 3, 1, 8, 0), datetime.datetime(2013, 3, 1, 9, 0))
res = self.resource_calendar.schedule_hours(
cr, uid, self.calendar_id, 40,
day_dt=self.date1.replace(minute=0, second=0)
)
self.assertEqual(res[0][0], datetime.strptime('2013-02-12 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[1][0], datetime.strptime('2013-02-15 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[1][1], datetime.strptime('2013-02-15 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[2][0], datetime.strptime('2013-02-15 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[2][1], datetime.strptime('2013-02-15 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[3][0], datetime.strptime('2013-02-19 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[3][1], datetime.strptime('2013-02-19 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[4][0], datetime.strptime('2013-02-22 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[4][1], datetime.strptime('2013-02-22 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[5][0], datetime.strptime('2013-02-22 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[5][1], datetime.strptime('2013-02-22 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[6][0], datetime.strptime('2013-02-26 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[6][1], datetime.strptime('2013-02-26 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
td = timedelta()
for item in res:
td += item[1] - item[0]
self.assertEqual(seconds(td) / 3600.0, 40.0, 'resource_calendar: wrong hours scheduling')
# res = self.resource_calendar.interval_get(cr, uid, self.calendar_id, self.date1, 40, resource=self.resource1_id, byday=True)
# (datetime.datetime(2013, 2, 12, 9, 0), datetime.datetime(2013, 2, 12, 16, 0))
# (datetime.datetime(2013, 2, 15, 8, 0), datetime.datetime(2013, 2, 15, 13, 0))
# (datetime.datetime(2013, 2, 15, 16, 0), datetime.datetime(2013, 2, 15, 23, 0))
# (datetime.datetime(2013, 3, 1, 8, 0), datetime.datetime(2013, 3, 1, 13, 0))
# (datetime.datetime(2013, 3, 1, 16, 0), datetime.datetime(2013, 3, 1, 23, 0))
# (datetime.datetime(2013, 3, 5, 8, 0), datetime.datetime(2013, 3, 5, 16, 0))
# (datetime.datetime(2013, 3, 8, 8, 0), datetime.datetime(2013, 3, 8, 9, 0))
res = self.resource_calendar.schedule_hours(
cr, uid, self.calendar_id, 40,
day_dt=self.date1.replace(minute=0, second=0),
compute_leaves=True,
resource_id=self.resource1_id
)
self.assertEqual(res[0][0], datetime.strptime('2013-02-12 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[1][0], datetime.strptime('2013-02-15 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[1][1], datetime.strptime('2013-02-15 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[2][0], datetime.strptime('2013-02-15 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[2][1], datetime.strptime('2013-02-15 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[3][0], datetime.strptime('2013-02-19 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[3][1], datetime.strptime('2013-02-19 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[4][0], datetime.strptime('2013-02-19 12:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[4][1], datetime.strptime('2013-02-19 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[5][0], datetime.strptime('2013-02-22 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[5][1], datetime.strptime('2013-02-22 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[6][0], datetime.strptime('2013-02-22 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[6][1], datetime.strptime('2013-02-22 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[7][0], datetime.strptime('2013-03-01 11:30:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[7][1], datetime.strptime('2013-03-01 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[8][0], datetime.strptime('2013-03-01 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[8][1], datetime.strptime('2013-03-01 22:30:00', _format), 'resource_calendar: wrong hours scheduling')
td = timedelta()
for item in res:
td += item[1] - item[0]
self.assertEqual(seconds(td) / 3600.0, 40.0, 'resource_calendar: wrong hours scheduling')
# --------------------------------------------------
# Test3: working hours (old _interval_hours_get)
# --------------------------------------------------
# old API: resource without leaves
# res: 2 weeks -> 40 hours
res = self.resource_calendar._interval_hours_get(
cr, uid, self.calendar_id,
self.date1.replace(hour=6, minute=0),
self.date2.replace(hour=23, minute=0) + relativedelta(days=7),
resource_id=self.resource1_id, exclude_leaves=True)
self.assertEqual(res, 40.0, 'resource_calendar: wrong _interval_hours_get compatibility computation')
# new API: resource without leaves
# res: 2 weeks -> 40 hours
res = self.resource_calendar.get_working_hours(
cr, uid, self.calendar_id,
self.date1.replace(hour=6, minute=0),
self.date2.replace(hour=23, minute=0) + relativedelta(days=7),
compute_leaves=False, resource_id=self.resource1_id)
self.assertEqual(res, 40.0, 'resource_calendar: wrong get_working_hours computation')
# old API: resource and leaves
# res: 2 weeks -> 40 hours - (3+4) leave hours
res = self.resource_calendar._interval_hours_get(
cr, uid, self.calendar_id,
self.date1.replace(hour=6, minute=0),
self.date2.replace(hour=23, minute=0) + relativedelta(days=7),
resource_id=self.resource1_id, exclude_leaves=False)
self.assertEqual(res, 33.0, 'resource_calendar: wrong _interval_hours_get compatibility computation')
# new API: resource and leaves
# res: 2 weeks -> 40 hours - (3+4) leave hours
res = self.resource_calendar.get_working_hours(
cr, uid, self.calendar_id,
self.date1.replace(hour=6, minute=0),
self.date2.replace(hour=23, minute=0) + relativedelta(days=7),
compute_leaves=True, resource_id=self.resource1_id)
self.assertEqual(res, 33.0, 'resource_calendar: wrong get_working_hours computation')
# --------------------------------------------------
# Test4: misc
# --------------------------------------------------
# Test without calendar and default_interval
res = self.resource_calendar.get_working_hours(
cr, uid, None,
self.date1.replace(hour=6, minute=0),
self.date2.replace(hour=23, minute=0),
compute_leaves=True, resource_id=self.resource1_id,
default_interval=(8, 16))
self.assertEqual(res, 32.0, 'resource_calendar: wrong get_working_hours computation')
def test_50_calendar_schedule_days(self):
""" Testing calendar days scheduling """
cr, uid = self.cr, self.uid
_format = '%Y-%m-%d %H:%M:%S'
# --------------------------------------------------
# Test1: with calendar
# --------------------------------------------------
res = self.resource_calendar.schedule_days_get_date(cr, uid, self.calendar_id, 5, day_date=self.date1)
self.assertEqual(res.date(), datetime.strptime('2013-02-26 00:0:00', _format).date(), 'resource_calendar: wrong days scheduling')
res = self.resource_calendar.schedule_days_get_date(
cr, uid, self.calendar_id, 5, day_date=self.date1,
compute_leaves=True, resource_id=self.resource1_id)
self.assertEqual(res.date(), datetime.strptime('2013-03-01 00:0:00', _format).date(), 'resource_calendar: wrong days scheduling')
# --------------------------------------------------
# Test2: misc
# --------------------------------------------------
# Without calendar, should only count days -> 12 -> 16, 5 days with default intervals
res = self.resource_calendar.schedule_days_get_date(cr, uid, None, 5, day_date=self.date1, default_interval=(8, 16))
self.assertEqual(res, datetime.strptime('2013-02-16 16:00:00', _format), 'resource_calendar: wrong days scheduling')
def seconds(td):
assert isinstance(td, timedelta)
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10.**6
|
snakeleon/YouCompleteMe-x64 | refs/heads/master | third_party/ycmd/ycmd/tests/python/subcommands_test.py | 2 | # Copyright (C) 2015-2020 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from hamcrest import ( assert_that,
contains_exactly,
contains_inanyorder,
equal_to,
has_item,
has_entries,
has_entry,
matches_regexp )
from pprint import pformat
from unittest.mock import patch
import os
import pytest
import requests
from ycmd.utils import ReadFile
from ycmd.tests.python import PathToTestFile, SharedYcmd
from ycmd.tests.test_utils import ( BuildRequest,
CombineRequest,
ChunkMatcher,
LocationMatcher,
ErrorMatcher,
ExpectedFailure )
TYPESHED_PATH = os.path.normpath(
PathToTestFile( '..', '..', '..', '..', 'third_party', 'jedi_deps', 'jedi',
'jedi', 'third_party', 'typeshed', 'stdlib', '2and3', 'builtins.pyi' ) )
class JediDef:
def __init__( self, col = None, line = None, path = None ):
self.column = col
self.line = line
self.module_path = path
self.description = ''
def RunTest( app, test ):
contents = ReadFile( test[ 'request' ][ 'filepath' ] )
# We ignore errors here and check the response code ourself.
# This is to allow testing of requests returning errors.
response = app.post_json(
'/run_completer_command',
CombineRequest( test[ 'request' ], {
'contents': contents,
'filetype': 'python',
'command_arguments': ( [ test[ 'request' ][ 'command' ] ]
+ test[ 'request' ].get( 'arguments', [] ) )
} ),
expect_errors = True
)
print( f'completer response: { pformat( response.json ) }' )
assert_that( response.status_code,
equal_to( test[ 'expect' ][ 'response' ] ) )
assert_that( response.json, test[ 'expect' ][ 'data' ] )
def Subcommands_GoTo( app, test, command ):
if isinstance( test[ 'response' ], list ):
expect = {
'response': requests.codes.ok,
'data': contains_inanyorder( *[
LocationMatcher(
PathToTestFile( 'goto', r[ 0 ] ),
r[ 1 ],
r[ 2 ],
**( {} if len( r ) < 4 else r[ 3 ] ) )
for r in test[ 'response' ]
] )
}
elif isinstance( test[ 'response' ], tuple ):
expect = {
'response': requests.codes.ok,
'data': LocationMatcher( PathToTestFile( 'goto',
test[ 'response' ][ 0 ] ),
test[ 'response' ][ 1 ],
test[ 'response' ][ 2 ],
**( {} if len( test[ 'response' ] ) < 4
else test[ 'response' ][ 3 ] ) )
}
else:
expect = {
'response': requests.codes.internal_server_error,
'data': ErrorMatcher( RuntimeError, test[ 'response' ] )
}
req = test[ 'request' ]
RunTest( app, {
'description': command + ' jumps to the right location',
'request': {
'command' : command,
'arguments': [] if len( req ) < 4 else req[ 3 ],
'filetype' : 'python',
'filepath' : PathToTestFile( 'goto', req[ 0 ] ),
'line_num' : req[ 1 ],
'column_num': req[ 2 ]
},
'expect': expect,
} )
@pytest.mark.parametrize( 'cmd', [ 'GoTo',
'GoToDefinition',
'GoToDeclaration' ] )
@pytest.mark.parametrize( 'test', [
# Nothing
{ 'request': ( 'basic.py', 3, 5 ), 'response': 'Can\'t jump to '
'definition.' },
# Keyword
{ 'request': ( 'basic.py', 4, 3 ), 'response': 'Can\'t jump to '
'definition.' },
# Builtin
{ 'request': ( 'basic.py', 1, 4 ), 'response': ( 'basic.py', 1, 1 ) },
{ 'request': ( 'basic.py', 1, 12 ), 'response': ( TYPESHED_PATH, 947, 7 ) },
{ 'request': ( 'basic.py', 2, 2 ), 'response': ( 'basic.py', 1, 1 ) },
# Class
{ 'request': ( 'basic.py', 4, 7 ), 'response': ( 'basic.py', 4, 7 ) },
{ 'request': ( 'basic.py', 4, 11 ), 'response': ( 'basic.py', 4, 7 ) },
{ 'request': ( 'basic.py', 7, 19 ), 'response': ( 'basic.py', 4, 7 ) },
# Instance
{ 'request': ( 'basic.py', 7, 1 ), 'response': ( 'basic.py', 7, 1 ) },
{ 'request': ( 'basic.py', 7, 11 ), 'response': ( 'basic.py', 7, 1 ) },
{ 'request': ( 'basic.py', 8, 23 ), 'response': ( 'basic.py', 7, 1 ) },
# Instance reference
{ 'request': ( 'basic.py', 8, 1 ), 'response': ( 'basic.py', 8, 1 ) },
{ 'request': ( 'basic.py', 8, 5 ), 'response': ( 'basic.py', 8, 1 ) },
{ 'request': ( 'basic.py', 9, 12 ), 'response': ( 'basic.py', 8, 1 ) },
# Member access
{ 'request': ( 'child.py', 4, 12 ),
'response': ( 'parent.py', 2, 7 ) },
# Builtin from different file
{ 'request': ( 'multifile1.py', 2, 30 ),
'response': ( 'multifile2.py', 1, 24 ) },
{ 'request': ( 'multifile1.py', 4, 5 ),
'response': ( 'multifile1.py', 2, 24 ) },
# Function from different file
{ 'request': ( 'multifile1.py', 1, 24 ),
'response': ( 'multifile3.py', 3, 5 ) },
{ 'request': ( 'multifile1.py', 5, 4 ),
'response': ( 'multifile1.py', 1, 24 ) },
# Alias from different file
{ 'request': ( 'multifile1.py', 2, 47 ),
'response': ( 'multifile2.py', 1, 51 ) },
{ 'request': ( 'multifile1.py', 6, 14 ),
'response': ( 'multifile1.py', 2, 36 ) },
# Absolute import from nested module
{ 'request': ( os.path.join( 'nested_import', 'importer.py' ), 1, 19 ),
'response': ( 'basic.py', 4, 7 ) },
{ 'request': ( os.path.join( 'nested_import', 'importer.py' ), 2, 40 ),
'response': ( os.path.join( 'nested_import', 'to_import.py' ), 1, 5 ) },
# Relative within nested module
{ 'request': ( os.path.join( 'nested_import', 'importer.py' ), 3, 28 ),
'response': ( os.path.join( 'nested_import', 'to_import.py' ), 4, 5 ) },
] )
@SharedYcmd
def Subcommands_GoTo_test( app, cmd, test ):
Subcommands_GoTo( app, test, cmd )
@pytest.mark.parametrize( 'test', [
{ 'request': ( 'basic.py', 1, 1, [ 'MyClass' ] ),
'response': ( 'basic.py', 4, 7 ) },
{ 'request': ( 'basic.py', 1, 1, [ 'class C' ] ),
'response': ( 'child.py', 2, 7, { 'description': 'class C' } ) },
{ 'request': ( 'basic.py', 1, 1, [ 'C.c' ] ),
'response': [
( 'child.py', 3, 7, { 'description': 'def c' } ),
( 'parent.py', 3, 7, { 'description': 'def c' } )
] },
{ 'request': ( 'basic.py', 1, 1, [ 'nothing_here_mate' ] ),
'response': 'Symbol not found' }
] )
@SharedYcmd
def Subcommands_GoToSymbol_test( app, test ):
Subcommands_GoTo( app, test, 'GoToSymbol' )
@pytest.mark.parametrize( 'test', [
{ 'request': ( 'basic.py', 1, 4 ),
'response': 'Can\'t jump to definition.', 'cmd': 'GoTo' },
{ 'request': ( 'basic.py', 1, 4 ),
'response': 'Can\'t find references.', 'cmd': 'GoToReferences' },
{ 'request': ( 'basic.py', 1, 4 ),
'response': 'Can\'t jump to type definition.', 'cmd': 'GoToType' }
] )
@SharedYcmd
def Subcommands_GoTo_SingleInvalidJediDefinition_test( app, test ):
with patch( 'ycmd.completers.python.python_completer.jedi.Script.infer',
return_value = [ JediDef() ] ):
with patch( 'ycmd.completers.python.python_completer.jedi.Script.goto',
return_value = [ JediDef() ] ):
with patch( 'ycmd.completers.python.python_completer.'
'jedi.Script.get_references',
return_value = [ JediDef() ] ):
Subcommands_GoTo( app, test, test.pop( 'cmd' ) )
def Subcommands_GetType( app, position, expected_message ):
filepath = PathToTestFile( 'GetType.py' )
contents = ReadFile( filepath )
command_data = BuildRequest( filepath = filepath,
filetype = 'python',
line_num = position[ 0 ],
column_num = position[ 1 ],
contents = contents,
command_arguments = [ 'GetType' ] )
assert_that(
app.post_json( '/run_completer_command', command_data ).json,
has_entry( 'message', expected_message )
)
@pytest.mark.parametrize( 'position,expected_message', [
( ( 11, 7 ), 'instance int' ),
( ( 11, 20 ), 'def some_function()' ),
( ( 12, 15 ), 'class SomeClass(*args, **kwargs)' ),
( ( 13, 8 ), 'instance SomeClass' ),
( ( 13, 17 ), 'def SomeMethod(first_param, second_param)' ),
( ( 19, 4 ), matches_regexp( '^(instance str, instance int|'
'instance int, instance str)$' ) )
] )
@SharedYcmd
def Subcommands_GetType_test( app, position, expected_message ):
Subcommands_GetType( app, position, expected_message )
@SharedYcmd
def Subcommands_GetType_NoTypeInformation_test( app ):
filepath = PathToTestFile( 'GetType.py' )
contents = ReadFile( filepath )
command_data = BuildRequest( filepath = filepath,
filetype = 'python',
line_num = 6,
column_num = 3,
contents = contents,
command_arguments = [ 'GetType' ] )
response = app.post_json( '/run_completer_command',
command_data,
expect_errors = True ).json
assert_that( response,
ErrorMatcher( RuntimeError, 'No type information available.' ) )
@SharedYcmd
def Subcommands_GetDoc_Method_test( app ):
# Testcase1
filepath = PathToTestFile( 'GetDoc.py' )
contents = ReadFile( filepath )
command_data = BuildRequest( filepath = filepath,
filetype = 'python',
line_num = 17,
column_num = 9,
contents = contents,
command_arguments = [ 'GetDoc' ] )
assert_that(
app.post_json( '/run_completer_command', command_data ).json,
has_entry( 'detailed_info', '_ModuleMethod()\n\n'
'Module method docs\n'
'Are dedented, like you might expect' )
)
@SharedYcmd
def Subcommands_GetDoc_Class_test( app ):
filepath = PathToTestFile( 'GetDoc.py' )
contents = ReadFile( filepath )
command_data = BuildRequest( filepath = filepath,
filetype = 'python',
line_num = 19,
column_num = 6,
contents = contents,
command_arguments = [ 'GetDoc' ] )
response = app.post_json( '/run_completer_command', command_data ).json
assert_that( response, has_entry(
'detailed_info', 'TestClass()\n\nClass Documentation',
) )
@SharedYcmd
def Subcommands_GetDoc_WhitespaceOnly_test( app ):
filepath = PathToTestFile( 'GetDoc.py' )
contents = ReadFile( filepath )
command_data = BuildRequest( filepath = filepath,
filetype = 'python',
line_num = 27,
column_num = 10,
contents = contents,
command_arguments = [ 'GetDoc' ] )
response = app.post_json( '/run_completer_command',
command_data,
expect_errors = True ).json
assert_that( response,
ErrorMatcher( RuntimeError, 'No documentation available.' ) )
@SharedYcmd
def Subcommands_GetDoc_NoDocumentation_test( app ):
filepath = PathToTestFile( 'GetDoc.py' )
contents = ReadFile( filepath )
command_data = BuildRequest( filepath = filepath,
filetype = 'python',
line_num = 8,
column_num = 23,
contents = contents,
command_arguments = [ 'GetDoc' ] )
response = app.post_json( '/run_completer_command',
command_data,
expect_errors = True ).json
assert_that( response,
ErrorMatcher( RuntimeError, 'No documentation available.' ) )
@pytest.mark.parametrize( 'test', [
{ 'request': ( 'basic.py', 2, 1 ), 'response': ( TYPESHED_PATH, 947, 7 ) },
{ 'request': ( 'basic.py', 8, 1 ), 'response': ( 'basic.py', 4, 7 ) },
{ 'request': ( 'basic.py', 3, 1 ),
'response': 'Can\'t jump to type definition.' },
] )
@SharedYcmd
def Subcommands_GoToType_test( app, test ):
Subcommands_GoTo( app, test, 'GoToType' )
@SharedYcmd
def Subcommands_GoToReferences_Function_test( app ):
filepath = PathToTestFile( 'goto', 'references.py' )
contents = ReadFile( filepath )
command_data = BuildRequest( filepath = filepath,
filetype = 'python',
line_num = 4,
column_num = 5,
contents = contents,
command_arguments = [ 'GoToReferences' ] )
assert_that(
app.post_json( '/run_completer_command', command_data ).json,
contains_exactly(
has_entries( {
'filepath': filepath,
'line_num': 1,
'column_num': 5,
'description': 'def f'
} ),
has_entries( {
'filepath': filepath,
'line_num': 4,
'column_num': 5,
'description': 'f'
} ),
has_entries( {
'filepath': filepath,
'line_num': 5,
'column_num': 5,
'description': 'f'
} ),
has_entries( {
'filepath': filepath,
'line_num': 6,
'column_num': 5,
'description': 'f'
} )
)
)
@SharedYcmd
def Subcommands_GoToReferences_Builtin_test( app ):
filepath = PathToTestFile( 'goto', 'references.py' )
contents = ReadFile( filepath )
command_data = BuildRequest( filepath = filepath,
filetype = 'python',
line_num = 8,
column_num = 1,
contents = contents,
command_arguments = [ 'GoToReferences' ] )
assert_that(
app.post_json( '/run_completer_command', command_data ).json,
has_item(
has_entries( {
'filepath': filepath,
'line_num': 8,
'column_num': 1,
'description': 'str'
} )
)
)
@SharedYcmd
def Subcommands_GoToReferences_NoReferences_test( app ):
filepath = PathToTestFile( 'goto', 'references.py' )
contents = ReadFile( filepath )
command_data = BuildRequest( filepath = filepath,
filetype = 'python',
line_num = 2,
column_num = 5,
contents = contents,
command_arguments = [ 'GoToReferences' ] )
response = app.post_json( '/run_completer_command',
command_data,
expect_errors = True ).json
assert_that( response,
ErrorMatcher( RuntimeError, 'Can\'t find references.' ) )
@SharedYcmd
def Subcommands_GoToReferences_InvalidJediReferences_test( app ):
with patch( 'ycmd.completers.python.python_completer.'
'jedi.Script.get_references',
return_value = [ JediDef(),
JediDef( 1, 1, PathToTestFile( 'foo.py' ) ) ] ):
filepath = PathToTestFile( 'goto', 'references.py' )
contents = ReadFile( filepath )
command_data = BuildRequest( filepath = filepath,
filetype = 'python',
line_num = 2,
column_num = 5,
contents = contents,
command_arguments = [ 'GoToReferences' ] )
response = app.post_json( '/run_completer_command',
command_data,
expect_errors = True ).json
assert_that( response, contains_exactly( has_entries( {
'line_num': 1,
'column_num': 2, # Jedi columns are 0 based
'filepath': PathToTestFile( 'foo.py' ) } ) ) )
@SharedYcmd
def Subcommands_RefactorRename_NoNewName_test( app ):
filepath = PathToTestFile( 'basic.py' )
contents = ReadFile( filepath )
command_data = BuildRequest( filepath = filepath,
filetype = 'python',
line_num = 3,
column_num = 10,
contents = contents,
command_arguments = [ 'RefactorRename' ] )
response = app.post_json( '/run_completer_command',
command_data,
expect_errors = True )
assert_that( response.status_code,
equal_to( requests.codes.internal_server_error ) )
assert_that( response.json,
ErrorMatcher( RuntimeError, 'Must specify a new name' ) )
@SharedYcmd
def Subcommands_RefactorRename_Same_test( app ):
filepath = PathToTestFile( 'basic.py' )
contents = ReadFile( filepath )
command_data = BuildRequest( filepath = filepath,
filetype = 'python',
line_num = 3,
column_num = 10,
contents = contents,
command_arguments = [ 'RefactorRename',
'c' ] )
response = app.post_json( '/run_completer_command',
command_data ).json
assert_that( response, has_entries( {
'fixits': contains_exactly(
has_entries( {
'text': '',
'chunks': contains_exactly(
ChunkMatcher( 'c',
LocationMatcher( filepath, 3, 10 ),
LocationMatcher( filepath, 3, 11 ) ),
ChunkMatcher( 'c',
LocationMatcher( filepath, 7, 3 ),
LocationMatcher( filepath, 7, 4 ) )
)
} )
)
} ) )
@SharedYcmd
def Subcommands_RefactorRename_Longer_test( app ):
filepath = PathToTestFile( 'basic.py' )
contents = ReadFile( filepath )
command_data = BuildRequest( filepath = filepath,
filetype = 'python',
line_num = 3,
column_num = 10,
contents = contents,
command_arguments = [ 'RefactorRename',
'booo' ] )
response = app.post_json( '/run_completer_command',
command_data ).json
assert_that( response, has_entries( {
'fixits': contains_exactly(
has_entries( {
'text': '',
'chunks': contains_exactly(
ChunkMatcher( 'booo',
LocationMatcher( filepath, 3, 10 ),
LocationMatcher( filepath, 3, 11 ) ),
ChunkMatcher( 'booo',
LocationMatcher( filepath, 7, 3 ),
LocationMatcher( filepath, 7, 4 ) )
)
} )
)
} ) )
@SharedYcmd
def Subcommands_RefactorRename_ShortenDelete_test( app ):
filepath = PathToTestFile( 'basic.py' )
contents = ReadFile( filepath )
command_data = BuildRequest( filepath = filepath,
filetype = 'python',
line_num = 1,
column_num = 8,
contents = contents,
command_arguments = [ 'RefactorRename',
'F' ] )
response = app.post_json( '/run_completer_command',
command_data ).json
assert_that( response, has_entries( {
'fixits': contains_exactly(
has_entries( {
'text': '',
'chunks': contains_exactly(
ChunkMatcher( '',
LocationMatcher( filepath, 1, 8 ),
LocationMatcher( filepath, 1, 10 ) ),
ChunkMatcher( '',
LocationMatcher( filepath, 6, 6 ),
LocationMatcher( filepath, 6, 8 ) )
)
} )
)
} ) )
@SharedYcmd
def Subcommands_RefactorRename_Shorten_test( app ):
filepath = PathToTestFile( 'basic.py' )
contents = ReadFile( filepath )
command_data = BuildRequest( filepath = filepath,
filetype = 'python',
line_num = 1,
column_num = 8,
contents = contents,
command_arguments = [ 'RefactorRename',
'G' ] )
response = app.post_json( '/run_completer_command',
command_data ).json
assert_that( response, has_entries( {
'fixits': contains_exactly(
has_entries( {
'text': '',
'chunks': contains_exactly(
ChunkMatcher( 'G',
LocationMatcher( filepath, 1, 7 ),
LocationMatcher( filepath, 1, 10 ) ),
ChunkMatcher( 'G',
LocationMatcher( filepath, 6, 5 ),
LocationMatcher( filepath, 6, 8 ) )
)
} )
)
} ) )
@SharedYcmd
def Subcommands_RefactorRename_StartOfFile_test( app ):
one = PathToTestFile( 'rename', 'one.py' )
contents = ReadFile( one )
command_data = BuildRequest( filepath = one,
filetype = 'python',
line_num = 8,
column_num = 44,
contents = contents,
command_arguments = [ 'RefactorRename',
'myvariable' ] )
response = app.post_json( '/run_completer_command',
command_data ).json
assert_that( response, has_entries( {
'fixits': contains_exactly(
has_entries( {
'text': '',
'chunks': contains_exactly(
ChunkMatcher( 'myvariable',
LocationMatcher( one, 1, 1 ),
LocationMatcher( one, 1, 13 ) ),
ChunkMatcher( 'myvariable',
LocationMatcher( one, 8, 33 ),
LocationMatcher( one, 8, 45 ) ),
ChunkMatcher( 'myvariable',
LocationMatcher( one, 16, 32 ),
LocationMatcher( one, 16, 44 ) )
)
} )
)
} ) )
@SharedYcmd
def Subcommands_RefactorRename_MultiFIle_test( app ):
one = PathToTestFile( 'rename', 'one.py' )
two = PathToTestFile( 'rename', 'two.py' )
contents = ReadFile( one )
command_data = BuildRequest( filepath = one,
filetype = 'python',
line_num = 4,
column_num = 7,
contents = contents,
command_arguments = [ 'RefactorRename',
'OneLove' ] )
response = app.post_json( '/run_completer_command',
command_data ).json
assert_that( response, has_entries( {
'fixits': contains_exactly(
has_entries( {
'text': '',
'chunks': contains_exactly(
ChunkMatcher( 'eLov',
LocationMatcher( one, 4, 9 ),
LocationMatcher( one, 4, 9 ) ),
ChunkMatcher( 'eLov',
LocationMatcher( one, 9, 24 ),
LocationMatcher( one, 9, 24 ) ),
ChunkMatcher( 'Love',
LocationMatcher( one, 16, 15 ),
LocationMatcher( one, 16, 15 ) ),
ChunkMatcher( 'eLov',
LocationMatcher( two, 4, 18 ),
LocationMatcher( two, 4, 18 ) ),
ChunkMatcher( 'Love',
LocationMatcher( two, 11, 14 ),
LocationMatcher( two, 11, 14 ) )
)
} )
)
} ) )
@ExpectedFailure( 'file renames not implemented yet' )
@SharedYcmd
def Subcommands_RefactorRename_Module_test( app ):
one = PathToTestFile( 'rename', 'one.py' )
two = PathToTestFile( 'rename', 'two.py' )
contents = ReadFile( two )
command_data = BuildRequest( filepath = two,
filetype = 'python',
line_num = 1,
column_num = 8,
contents = contents,
command_arguments = [ 'RefactorRename',
'pfivr' ] )
response = app.post_json( '/run_completer_command',
command_data ).json
assert_that( response, has_entries( {
'fixits': contains_exactly(
has_entries( {
'text': '',
'chunks': contains_exactly(
ChunkMatcher( 'pfivr',
LocationMatcher( two, 1, 8 ),
LocationMatcher( two, 1, 11 ) ),
ChunkMatcher( 'pfivr',
LocationMatcher( two, 4, 12 ),
LocationMatcher( two, 4, 15 ) )
),
'files': contains_exactly(
has_entries( {
'operation': 'RENAME',
'old_file': one,
'new_file': PathToTestFile( 'rename', 'pfivr.py' )
} )
)
} )
)
} ) )
def Dummy_test():
# Workaround for https://github.com/pytest-dev/pytest-rerunfailures/issues/51
assert True
|
c4fcm/DataBasic | refs/heads/master | databasic/mail.py | 1 | import logging
from flask_mail import Message
from databasic import app, mail
logger = logging.getLogger(__name__)
DEFAULT_SENDER = app.config.get('MAIL_USERNAME')
def send_email(sender, recipients, subject, message):
logger.debug('Sending mail '+sender+':'+subject)
msg = Message(subject,
sender=sender,
recipients=recipients)
msg.body = message
mail.send(msg)
'''
def send_html_email(subject, recipients, text_body, html_body):
msg = Message(subject, sender=DEFAULT_SENDER, recipients=recipients)
msg.body = text_body
msg.html = html_body
mail.send(msg)
'''
|
ingokegel/intellij-community | refs/heads/master | python/testData/intentions/googleNoReturnSectionForInit.py | 82 | class C:
def __i<caret>nit__(self, x, y):
return None |
neumerance/cloudloon2 | refs/heads/master | .venv/lib/python2.7/site-packages/heatclient/tests/__init__.py | 12133432 | |
globocom/database-as-a-service | refs/heads/master | dbaas/workflow/steps/redis/__init__.py | 12133432 | |
chand3040/cloud_that | refs/heads/named-release/cypress.rc | lms/djangoapps/certificates/management/commands/__init__.py | 12133432 | |
okfish/django-oscar | refs/heads/master | tests/integration/customer/__init__.py | 12133432 | |
rmehta/frappe | refs/heads/develop | frappe/core/doctype/language/__init__.py | 12133432 | |
gengue/django | refs/heads/master | tests/gis_tests/rasterapp/__init__.py | 12133432 | |
stonebig/bokeh | refs/heads/master | bokeh/sampledata/tests/test_us_counties.py | 2 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from bokeh._testing.util.api import verify_all
# Module under test
#import bokeh.sampledata.us_counties as bsu
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
ALL = (
'data',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
Test___all__ = pytest.mark.sampledata(verify_all("bokeh.sampledata.us_counties", ALL))
@pytest.mark.sampledata
def test_data():
import bokeh.sampledata.us_counties as bsu
assert isinstance(bsu.data, dict)
# don't check detail for external data
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
Kongsea/tensorflow | refs/heads/master | tensorflow/contrib/distributions/python/ops/bijectors/invert.py | 30 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Invert bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops.distributions import bijector as bijector_lib
__all__ = [
"Invert",
]
class Invert(bijector_lib.Bijector):
"""Bijector which inverts another Bijector.
Example Use: [ExpGammaDistribution (see Background & Context)](
https://reference.wolfram.com/language/ref/ExpGammaDistribution.html)
models `Y=log(X)` where `X ~ Gamma`.
```python
exp_gamma_distribution = TransformedDistribution(
distribution=Gamma(concentration=1., rate=2.),
bijector=bijector.Invert(bijector.Exp())
```
"""
def __init__(self, bijector, validate_args=False, name=None):
"""Creates a `Bijector` which swaps the meaning of `inverse` and `forward`.
Note: An inverted bijector's `inverse_log_det_jacobian` is often more
efficient if the base bijector implements `_forward_log_det_jacobian`. If
`_forward_log_det_jacobian` is not implemented then the following code is
used:
```python
y = self.inverse(x, **kwargs)
return -self.inverse_log_det_jacobian(y, **kwargs)
```
Args:
bijector: Bijector instance.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str`, name given to ops managed by this object.
"""
if not bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError(
"Invert is not implemented for non-injective bijectors.")
self._bijector = bijector
super(Invert, self).__init__(
event_ndims=bijector.event_ndims,
graph_parents=bijector.graph_parents,
is_constant_jacobian=bijector.is_constant_jacobian,
validate_args=validate_args,
dtype=bijector.dtype,
name=name or "_".join(["invert", bijector.name]))
def _forward_event_shape(self, input_shape):
return self.bijector._inverse_event_shape(input_shape) # pylint: disable=protected-access
def _forward_event_shape_tensor(self, input_shape):
return self.bijector._inverse_event_shape_tensor(input_shape) # pylint: disable=protected-access
def _inverse_event_shape(self, output_shape):
return self.bijector._forward_event_shape(output_shape) # pylint: disable=protected-access
def _inverse_event_shape_tensor(self, output_shape):
return self.bijector._forward_event_shape_tensor(output_shape) # pylint: disable=protected-access
@property
def bijector(self):
return self._bijector
def _forward(self, x, **kwargs):
return self.bijector._inverse(x, **kwargs) # pylint: disable=protected-access
def _inverse(self, y, **kwargs):
return self.bijector._forward(y, **kwargs) # pylint: disable=protected-access
def _inverse_log_det_jacobian(self, y, **kwargs):
return self.bijector._forward_log_det_jacobian(y, **kwargs) # pylint: disable=protected-access
def _forward_log_det_jacobian(self, x, **kwargs):
return self.bijector._inverse_log_det_jacobian(x, **kwargs) # pylint: disable=protected-access
|
tbentropy/tilecutter | refs/heads/master | old/v.0.3/setup.py | 1 | from distutils.core import setup
import py2exe
setup(
windows = [
{
"script": "tilecutter.py",
"icon_resources": [(1, "tilecutter.ico")]
}
],
)
|
alisidd/tensorflow | refs/heads/asgd-dc | tensorflow/python/kernel_tests/weights_broadcast_test.py | 130 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for broadcast rules."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.platform import test
def _test_values(shape):
return np.reshape(np.cumsum(np.ones(shape), dtype=np.int32), newshape=shape)
class AssertBroadcastableTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def _test_valid(self, weights, values):
static_op = weights_broadcast_ops.assert_broadcastable(
weights=weights, values=values)
weights_placeholder = array_ops.placeholder(dtypes_lib.float32)
values_placeholder = array_ops.placeholder(dtypes_lib.float32)
dynamic_op = weights_broadcast_ops.assert_broadcastable(
weights=weights_placeholder, values=values_placeholder)
with self.test_session():
static_op.run()
dynamic_op.run(feed_dict={
weights_placeholder: weights,
values_placeholder: values,
})
def testScalar(self):
self._test_valid(weights=5, values=_test_values((3, 2, 4)))
def test1x1x1(self):
self._test_valid(
weights=np.asarray((5,)).reshape((1, 1, 1)),
values=_test_values((3, 2, 4)))
def test1x1xN(self):
self._test_valid(
weights=np.asarray((5, 7, 11, 3)).reshape((1, 1, 4)),
values=_test_values((3, 2, 4)))
def test1xNx1(self):
self._test_valid(
weights=np.asarray((5, 11)).reshape((1, 2, 1)),
values=_test_values((3, 2, 4)))
def test1xNxN(self):
self._test_valid(
weights=np.asarray((5, 7, 11, 3, 2, 13, 7, 5)).reshape((1, 2, 4)),
values=_test_values((3, 2, 4)))
def testNx1x1(self):
self._test_valid(
weights=np.asarray((5, 7, 11)).reshape((3, 1, 1)),
values=_test_values((3, 2, 4)))
def testNx1xN(self):
self._test_valid(
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3)).reshape((3, 1, 4)),
values=_test_values((3, 2, 4)))
def testNxNxN(self):
self._test_valid(
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3,
2, 17, 11, 3, 5, 7, 11, 3, 2, 12, 7, 5)).reshape((3, 2, 4)),
values=_test_values((3, 2, 4)))
def _test_invalid(self, weights, values):
error_msg = 'weights can not be broadcast to values'
with self.assertRaisesRegexp(ValueError, error_msg):
weights_broadcast_ops.assert_broadcastable(weights=weights, values=values)
weights_placeholder = array_ops.placeholder(dtypes_lib.float32)
values_placeholder = array_ops.placeholder(dtypes_lib.float32)
dynamic_op = weights_broadcast_ops.assert_broadcastable(
weights=weights_placeholder, values=values_placeholder)
with self.test_session():
with self.assertRaisesRegexp(errors_impl.OpError, error_msg):
dynamic_op.run(feed_dict={
weights_placeholder: weights,
values_placeholder: values,
})
def testInvalid1(self):
self._test_invalid(weights=np.asarray((5,)), values=_test_values((3, 2, 4)))
def testInvalid1x1(self):
self._test_invalid(
weights=np.asarray((5,)).reshape((1, 1)),
values=_test_values((3, 2, 4)))
def testInvalidPrefixMatch(self):
self._test_invalid(
weights=np.asarray((5, 7, 11, 3, 2, 12)).reshape((3, 2)),
values=_test_values((3, 2, 4)))
def testInvalidSuffixMatch(self):
self._test_invalid(
weights=np.asarray((5, 7, 11, 3, 2, 12, 7, 5)).reshape((2, 4)),
values=_test_values((3, 2, 4)))
def testInvalidOnesExtraDim(self):
self._test_invalid(
weights=np.asarray((5,)).reshape((1, 1, 1, 1)),
values=_test_values((3, 2, 4)))
def testInvalidPrefixMatchExtraDim(self):
self._test_invalid(
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3,
2, 17, 11, 3, 5, 7, 11, 3, 2, 12, 7, 5)).reshape((3, 2, 4, 1)),
values=_test_values((3, 2, 4)))
def testInvalidSuffixMatchExtraDim(self):
self._test_invalid(
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3,
2, 17, 11, 3, 5, 7, 11, 3, 2, 12, 7, 5)).reshape((1, 3, 2, 4)),
values=_test_values((3, 2, 4)))
class BroadcastWeightsTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def _test_valid(self, weights, values, expected):
static_op = weights_broadcast_ops.broadcast_weights(
weights=weights, values=values)
weights_placeholder = array_ops.placeholder(dtypes_lib.float32)
values_placeholder = array_ops.placeholder(dtypes_lib.float32)
dynamic_op = weights_broadcast_ops.broadcast_weights(
weights=weights_placeholder, values=values_placeholder)
with self.test_session():
self.assertAllEqual(expected, static_op.eval())
self.assertAllEqual(expected, dynamic_op.eval(feed_dict={
weights_placeholder: weights,
values_placeholder: values,
}))
def testScalar(self):
self._test_valid(
weights=5,
values=_test_values((3, 2, 4)),
expected=5 * np.ones((3, 2, 4)))
def test1x1x1(self):
self._test_valid(
weights=np.asarray((5,)).reshape((1, 1, 1)),
values=_test_values((3, 2, 4)),
expected=5 * np.ones((3, 2, 4)))
def test1x1xN(self):
weights = np.asarray((5, 7, 11, 3)).reshape((1, 1, 4))
self._test_valid(
weights=weights,
values=_test_values((3, 2, 4)),
expected=np.tile(weights, reps=(3, 2, 1)))
def test1xNx1(self):
weights = np.asarray((5, 11)).reshape((1, 2, 1))
self._test_valid(
weights=weights,
values=_test_values((3, 2, 4)),
expected=np.tile(weights, reps=(3, 1, 4)))
def test1xNxN(self):
weights = np.asarray((5, 7, 11, 3, 2, 13, 7, 5)).reshape((1, 2, 4))
self._test_valid(
weights=weights,
values=_test_values((3, 2, 4)),
expected=np.tile(weights, reps=(3, 1, 1)))
def testNx1x1(self):
weights = np.asarray((5, 7, 11)).reshape((3, 1, 1))
self._test_valid(
weights=weights,
values=_test_values((3, 2, 4)),
expected=np.tile(weights, reps=(1, 2, 4)))
def testNx1xN(self):
weights = np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3)).reshape((3, 1, 4))
self._test_valid(
weights=weights,
values=_test_values((3, 2, 4)),
expected=np.tile(weights, reps=(1, 2, 1)))
def testNxNxN(self):
weights = np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3,
2, 17, 11, 3, 5, 7, 11, 3, 2, 12, 7, 5)).reshape((3, 2, 4))
self._test_valid(
weights=weights, values=_test_values((3, 2, 4)), expected=weights)
def _test_invalid(self, weights, values):
error_msg = 'weights can not be broadcast to values'
with self.assertRaisesRegexp(ValueError, error_msg):
weights_broadcast_ops.broadcast_weights(weights=weights, values=values)
weights_placeholder = array_ops.placeholder(dtypes_lib.float32)
values_placeholder = array_ops.placeholder(dtypes_lib.float32)
dynamic_op = weights_broadcast_ops.broadcast_weights(
weights=weights_placeholder, values=values_placeholder)
with self.test_session():
with self.assertRaisesRegexp(errors_impl.OpError, error_msg):
dynamic_op.eval(feed_dict={
weights_placeholder: weights,
values_placeholder: values,
})
def testInvalid1(self):
self._test_invalid(weights=np.asarray((5,)), values=_test_values((3, 2, 4)))
def testInvalid1x1(self):
self._test_invalid(
weights=np.asarray((5,)).reshape((1, 1)),
values=_test_values((3, 2, 4)))
def testInvalidPrefixMatch(self):
self._test_invalid(
weights=np.asarray((5, 7, 11, 3, 2, 12)).reshape((3, 2)),
values=_test_values((3, 2, 4)))
def testInvalidSuffixMatch(self):
self._test_invalid(
weights=np.asarray((5, 7, 11, 3, 2, 12, 7, 5)).reshape((2, 4)),
values=_test_values((3, 2, 4)))
def testInvalidOnesExtraDim(self):
self._test_invalid(
weights=np.asarray((5,)).reshape((1, 1, 1, 1)),
values=_test_values((3, 2, 4)))
def testInvalidPrefixMatchExtraDim(self):
self._test_invalid(
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3,
2, 17, 11, 3, 5, 7, 11, 3, 2, 12, 7, 5)).reshape((3, 2, 4, 1)),
values=_test_values((3, 2, 4)))
def testInvalidSuffixMatchExtraDim(self):
self._test_invalid(
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3,
2, 17, 11, 3, 5, 7, 11, 3, 2, 12, 7, 5)).reshape((1, 3, 2, 4)),
values=_test_values((3, 2, 4)))
if __name__ == '__main__':
test.main()
|
YukinoHayakawa/mtasa-blue | refs/heads/master | vendor/google-breakpad/src/tools/gyp/test/mac/gyptest-clang-cxx-language-standard.py | 264 | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that CLANG_CXX_LANGUAGE_STANDARD works.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['make', 'ninja', 'xcode'])
test.run_gyp('clang-cxx-language-standard.gyp',
chdir='clang-cxx-language-standard')
test.build('clang-cxx-language-standard.gyp', test.ALL,
chdir='clang-cxx-language-standard')
test.pass_test()
|
epssy/hue | refs/heads/master | desktop/core/ext-py/Django-1.6.10/django/utils/http.py | 35 | from __future__ import unicode_literals
import base64
import calendar
import datetime
import re
import sys
from binascii import Error as BinasciiError
from email.utils import formatdate
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_str, force_text
from django.utils.functional import allow_lazy
from django.utils import six
from django.utils.six.moves.urllib.parse import (
quote, quote_plus, unquote, unquote_plus, urlparse,
urlencode as original_urlencode)
ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"')
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
def urlquote(url, safe='/'):
"""
A version of Python's urllib.quote() function that can operate on unicode
strings. The url is first UTF-8 encoded before quoting. The returned string
can safely be used as part of an argument to a subsequent iri_to_uri() call
without double-quoting occurring.
"""
return force_text(quote(force_str(url), force_str(safe)))
urlquote = allow_lazy(urlquote, six.text_type)
def urlquote_plus(url, safe=''):
"""
A version of Python's urllib.quote_plus() function that can operate on
unicode strings. The url is first UTF-8 encoded before quoting. The
returned string can safely be used as part of an argument to a subsequent
iri_to_uri() call without double-quoting occurring.
"""
return force_text(quote_plus(force_str(url), force_str(safe)))
urlquote_plus = allow_lazy(urlquote_plus, six.text_type)
def urlunquote(quoted_url):
"""
A wrapper for Python's urllib.unquote() function that can operate on
the result of django.utils.http.urlquote().
"""
return force_text(unquote(force_str(quoted_url)))
urlunquote = allow_lazy(urlunquote, six.text_type)
def urlunquote_plus(quoted_url):
"""
A wrapper for Python's urllib.unquote_plus() function that can operate on
the result of django.utils.http.urlquote_plus().
"""
return force_text(unquote_plus(force_str(quoted_url)))
urlunquote_plus = allow_lazy(urlunquote_plus, six.text_type)
def urlencode(query, doseq=0):
"""
A version of Python's urllib.urlencode() function that can operate on
unicode strings. The parameters are first cast to UTF-8 encoded strings and
then encoded as per normal.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
return original_urlencode(
[(force_str(k),
[force_str(i) for i in v] if isinstance(v, (list,tuple)) else force_str(v))
for k, v in query],
doseq)
def cookie_date(epoch_seconds=None):
"""
Formats the time to ensure compatibility with Netscape's cookie standard.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Formats the time to match the RFC1123 date format as specified by HTTP
RFC2616 section 3.3.1.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s GMT' % rfcdate[:25]
def parse_http_date(date):
"""
Parses a date format as specified by HTTP RFC2616 section 3.3.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Returns an integer expressed in seconds since the epoch, in UTC.
"""
# emails.Util.parsedate does the job for RFC1123 dates; unfortunately
# RFC2616 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception:
six.reraise(ValueError, ValueError("%r is not a valid date" % date), sys.exc_info()[2])
def parse_http_date_safe(date):
"""
Same as parse_http_date, but returns None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Converts a base 36 string to an ``int``. Raises ``ValueError` if the
input won't fit into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is long than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
value = int(s, 36)
# ... then do a final check that the value will fit into an int to avoid
# returning a long (#15067). The long type was removed in Python 3.
if six.PY2 and value > sys.maxint:
raise ValueError("Base36 input too large")
return value
def int_to_base36(i):
"""
Converts an integer to a base36 string
"""
digits = "0123456789abcdefghijklmnopqrstuvwxyz"
factor = 0
if i < 0:
raise ValueError("Negative base36 conversion input.")
if six.PY2:
if not isinstance(i, six.integer_types):
raise TypeError("Non-integer base36 conversion input.")
if i > sys.maxint:
raise ValueError("Base36 conversion input too large.")
# Find starting factor
while True:
factor += 1
if i < 36 ** factor:
factor -= 1
break
base36 = []
# Construct base36 representation
while factor >= 0:
j = 36 ** factor
base36.append(digits[i // j])
i = i % j
factor -= 1
return ''.join(base36)
def urlsafe_base64_encode(s):
"""
Encodes a bytestring in base64 for use in URLs, stripping any trailing
equal signs.
"""
return base64.urlsafe_b64encode(s).rstrip(b'\n=')
def urlsafe_base64_decode(s):
"""
Decodes a base64 encoded string, adding back any trailing equal signs that
might have been stripped.
"""
s = s.encode('utf-8') # base64encode should only return ASCII.
try:
return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'='))
except (LookupError, BinasciiError) as e:
raise ValueError(e)
def parse_etags(etag_str):
"""
Parses a string with one or several etags passed in If-None-Match and
If-Match headers by the rules in RFC 2616. Returns a list of etags
without surrounding double quotes (") and unescaped from \<CHAR>.
"""
etags = ETAG_MATCH.findall(etag_str)
if not etags:
# etag_str has wrong format, treat it as an opaque string then
return [etag_str]
etags = [e.encode('ascii').decode('unicode_escape') for e in etags]
return etags
def quote_etag(etag):
"""
Wraps a string in double quotes escaping contents as necessary.
"""
return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"')
def same_origin(url1, url2):
"""
Checks if two URLs are 'same-origin'
"""
p1, p2 = urlparse(url1), urlparse(url2)
try:
return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port)
except ValueError:
return False
def is_safe_url(url, host=None):
"""
Return ``True`` if the url is a safe redirection (i.e. it doesn't point to
a different host and uses a safe scheme).
Always returns ``False`` on an empty url.
"""
if not url:
return False
url = url.strip()
# Chrome treats \ completely as /
url = url.replace('\\', '/')
# Chrome considers any URL with more than two slashes to be absolute, but
# urlaprse is not so flexible. Treat any url with three slashes as unsafe.
if url.startswith('///'):
return False
url_info = urlparse(url)
# Forbid URLs like http:///example.com - with a scheme, but without a hostname.
# In that URL, example.com is not the hostname but, a path component. However,
# Chrome will still consider example.com to be the hostname, so we must not
# allow this syntax.
if not url_info.netloc and url_info.scheme:
return False
return (not url_info.netloc or url_info.netloc == host) and \
(not url_info.scheme or url_info.scheme in ['http', 'https'])
|
anhstudios/swganh | refs/heads/develop | data/scripts/templates/object/mobile/shared_dressed_rebel_major_human_male_01.py | 2 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_rebel_major_human_male_01.iff"
result.attribute_template_id = 9
result.stfName("npc_name","human_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
robertdfrench/psychic-disco | refs/heads/master | setup.py | 1 | from setuptools import setup, find_packages
import os
def readme():
with open('README.rst') as f:
return f.read()
def requirements():
with open('requirements.txt') as f:
return [line.rstrip('\n') for line in f]
def version():
return os.environ['PD_VERSION']
setup(name='psychic_disco',
version=version(),
description='Pythonic Microservices on AWS Lambda',
long_description=readme(),
url='http://github.com/robertdfrench/psychic-disco',
author='Robert D. French',
author_email='[email protected]',
license='MIT',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
test_suite='nose.collector',
tests_require=['nose'],
install_requires=requirements(),
scripts=['bin/psychic_disco'])
|
hvqzao/ipport | refs/heads/master | deprecated/ipport-list-to-screen-script-nmap-cmds.py | 1 | #!/usr/bin/env python
name = 'regular'
for ip,port in map(lambda x: x.split(' '), filter(lambda x: x[:1] != '#', map(lambda x: x.strip(), open('ports').read().strip().split('\n')))):
cmd = 'time nmap -Pn -A -T4 --open -p '+port+' -oA '+name+'-'+ip+'-'+port+' '+ip
#cmd = 'time nmap -T2 -vv -p1-63335 127.0.0.1'
print 'screen -d -m -S '+name+'-'+ip+'-'+port+' script -f -c \''+cmd+'\' '+name+'-'+ip+'-'+port+'.log'
|
nagyistoce/edx-platform | refs/heads/master | common/djangoapps/edxmako/paths.py | 59 | """
Set up lookup paths for mako templates.
"""
import hashlib
import os
import pkg_resources
from django.conf import settings
from mako.lookup import TemplateLookup
from . import LOOKUP
class DynamicTemplateLookup(TemplateLookup):
"""
A specialization of the standard mako `TemplateLookup` class which allows
for adding directories progressively.
"""
def __init__(self, *args, **kwargs):
super(DynamicTemplateLookup, self).__init__(*args, **kwargs)
self.__original_module_directory = self.template_args['module_directory']
def add_directory(self, directory, prepend=False):
"""
Add a new directory to the template lookup path.
"""
if prepend:
self.directories.insert(0, os.path.normpath(directory))
else:
self.directories.append(os.path.normpath(directory))
# Since the lookup path has changed, the compiled modules might be
# wrong because now "foo.html" might be a completely different template,
# and "foo.html.py" in the module directory has no way to know that.
# Update the module_directory argument to point to a directory
# specifically for this lookup path.
unique = hashlib.md5(":".join(str(d) for d in self.directories)).hexdigest()
self.template_args['module_directory'] = os.path.join(self.__original_module_directory, unique)
# Also clear the internal caches. Ick.
self._collection.clear()
self._uri_cache.clear()
def clear_lookups(namespace):
"""
Remove mako template lookups for the given namespace.
"""
if namespace in LOOKUP:
del LOOKUP[namespace]
def add_lookup(namespace, directory, package=None, prepend=False):
"""
Adds a new mako template lookup directory to the given namespace.
If `package` is specified, `pkg_resources` is used to look up the directory
inside the given package. Otherwise `directory` is assumed to be a path
in the filesystem.
"""
templates = LOOKUP.get(namespace)
if not templates:
LOOKUP[namespace] = templates = DynamicTemplateLookup(
module_directory=settings.MAKO_MODULE_DIR,
output_encoding='utf-8',
input_encoding='utf-8',
default_filters=['decode.utf8'],
encoding_errors='replace',
)
if package:
directory = pkg_resources.resource_filename(package, directory)
templates.add_directory(directory, prepend=prepend)
def lookup_template(namespace, name):
"""
Look up a Mako template by namespace and name.
"""
return LOOKUP[namespace].get_template(name)
|
chiragjogi/odoo | refs/heads/8.0 | openerp/addons/base/tests/test_xmlrpc.py | 200 | # -*- coding: utf-8 -*-
import openerp.tests.common
class test_xmlrpc(openerp.tests.common.HttpCase):
at_install = False
post_install = True
def test_01_xmlrpc_login(self):
""" Try to login on the common service. """
db_name = openerp.tests.common.get_db_name()
uid = self.xmlrpc_common.login(db_name, 'admin', 'admin')
self.assertEqual(uid, 1)
def test_xmlrpc_ir_model_search(self):
""" Try a search on the object service. """
o = self.xmlrpc_object
db_name = openerp.tests.common.get_db_name()
ids = o.execute(db_name, 1, 'admin', 'ir.model', 'search', [])
self.assertIsInstance(ids, list)
ids = o.execute(db_name, 1, 'admin', 'ir.model', 'search', [], {})
self.assertIsInstance(ids, list)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
uclouvain/OSIS-Louvain | refs/heads/master | base/migrations/0495_learningachievement_consistency_id.py | 1 | import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0494_auto_20200115_0952'),
]
operations = [
migrations.AddField(
model_name='learningachievement',
name='consistency_id',
field=models.IntegerField(default=1, validators=[django.core.validators.MinValueValidator(1)]),
),
migrations.RunSQL(
sql='UPDATE base_learningachievement SET consistency_id=base_learningachievement.order+1',
reverse_sql=migrations.RunSQL.noop
),
]
|
duane-edgington/stoqs | refs/heads/master | stoqs/loaders/CANON/realtime/makeContour.py | 3 | __author__ = 'dcline'
import os
import sys
if 'DJANGO_SETTINGS_MODULE' not in os.environ:
os.environ['DJANGO_SETTINGS_MODULE'] = 'config.settings.local'
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../toNetCDF")) # lrauvNc4ToNetcdf.py is in sister toNetCDF dir
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../../")) # settings.py is two dirs up
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "./"))
import pytz
from . import Contour
from .Contour import Contour
from datetime import datetime, timedelta
class makeContour(object):
'''
Create contour plots for visualizing data from LRAUV vehicles
'''
def process_command_line(self):
'''The argparse library is included in Python 2.7 and is an added package for STOQS.
'''
import argparse
from argparse import RawTextHelpFormatter
parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter,
description='Script to create contour plots of CTD data collected by the LRAUV')
parser.add_argument('-d', '--database', action='store', help='database', default='stoqs')
parser.add_argument('--start', action='store', help='Start time in YYYYMMDDTHHMMSS format', default='20150310T210000', required=False)
parser.add_argument('--end', action='store', help='End time in YYYYMMDDTHHMMSS format',default='20150311T21000', required=False)
parser.add_argument('--daily', action='store', help='True to generate a daily plot',default=True, required=False)
parser.add_argument('--animate', action='store', help='if True will create frames to make animation from',default=False, required=False)
parser.add_argument('--zoom', action='store', help='time window in hours to zoom animation',default=8, required=False)
parser.add_argument('--overlap', action='store', help='time window in hours to overlap animation',default=2, required=False)
parser.add_argument('--title', action='store', help='Title for plots, will override default title created if --start specified', default='MBARI LRAUV Survey')
parser.add_argument('-v', '--verbose', nargs='?', choices=[1,2,3], type=int, help='Turn on verbose output. Higher number = more output.', const=1)
parser.add_argument('--minDepth', action='store', help='Minimum depth for data queries', default=0, type=float)
parser.add_argument('--maxDepth', action='store', help='Maximum depth for data queries', default=80, type=float)
parser.add_argument('-o', '--outDir', action='store', help='output directory to store contour image file', default='/tmp',required=False)
parser.add_argument('--parms', action='store', help='List of space separated parameters to contour plot', nargs='*', default=
['sea_water_temperature', 'sea_water_salinity', 'mass_concentration_of_chlorophyll_in_sea_water'])
parser.add_argument('--platformName', action='store', help='Filename to store output image to', default='daphne',required=False)
parser.add_argument('-t', '--contourUrl', action='store', help='base url to store cross referenced contour plot resources', default='http://elvis.shore.mbari.org/thredds/catalog/LRAUV/stoqs',required=False)
self.args = parser.parse_args()
self.commandline = ' '.join(sys.argv)
startDatetime = datetime.strptime(self.args.start, '%Y%m%dT%H%M%S')
endDatetime = datetime.strptime(self.args.end, '%Y%m%dT%H%M%S')
self.endDatetimeUTC = pytz.utc.localize(endDatetime)
endDatetimeLocal = self.endDatetimeUTC.astimezone(pytz.timezone('America/Los_Angeles'))
self.startDatetimeUTC = pytz.utc.localize(startDatetime)
startDatetimeLocal = self.startDatetimeUTC.astimezone(pytz.timezone('America/Los_Angeles'))
# If daily image round the UTC time to the local time and do the query for the 24 hour period
if self.args.daily:
startDatetimeLocal = startDatetimeLocal.replace(hour=0,minute=0,second=0,microsecond=0)
endDatetimeLocal = startDatetimeLocal.replace(hour=23,minute=0,second=0,microsecond=0)
self.startDatetimeUTC = startDatetimeLocal.astimezone(pytz.utc)
self.endDatetimeUTC = endDatetimeLocal.astimezone(pytz.utc)
def run(self):
title = 'MBARI LRAUV Survey'
outFile = self.args.outDir + '/' + self.args.platformName + '_log_' + self.startDatetimeUTC.strftime('%Y%m%dT%H%M%S') + '_' + self.endDatetimeUTC.strftime('%Y%m%dT%H%M%S') + '.png'
c = Contour(self.startDatetimeUTC, self.endDatetimeUTC, self.args.database, self.args.platformName, self.args.parms, title, outFile, False)
c.run()
cmd = r'scp %s [email protected]:/mbari/LRAUV/stoqs' % (outFile)
#logger.debug('%s', cmd)
import pdb; pdb.set_trace()
os.system(cmd)
if __name__ == '__main__':
d = makeContour()
d.process_command_line()
d.run()
|
webcube/django-hyperadmin | refs/heads/master | hyperadmin/resources/storages/indexes.py | 1 | from hyperadmin.indexes import Index
from hyperadmin.resources.storages.endpoints import BoundFile
from django.core.paginator import Page
from django.core.exceptions import ObjectDoesNotExist
class StoragePaginator(object):
#count, num_pages, object_list
def __init__(self, index):
self.instances = index
self.count = len(self.instances)
self.num_pages = 1
self.object_list = self.instances
@property
def endpoint(self):
return self.state.endpoint
def page(self, page_num):
return Page(self.object_list, page_num, self)
class StorageIndex(Index):
paginator_class = StoragePaginator
@property
def storage(self):
return self.resource.storage
def get_url_params(self, param_map={}):
"""
returns url parts for use in the url regexp for conducting item lookups
"""
param_map.setdefault('path', 'path')
return [r'(?P<{path}>.+)'.format(**param_map)]
def get_url_params_from_item(self, item, param_map={}):
param_map.setdefault('path', 'path')
return {param_map['path']: item.instance.name}
def populate_state(self):
self.path = self.state.params.get('path', '')
query = self.get_index_query().filter(self.path)
self.dirs, self.instances = query.get_dirs_and_files()
def get_filtered_index(self):
return self.instances
def get_filter_links(self, **link_kwargs):
links = list()
if self.path:
kwargs = {
'url':'./%s' % self.state.get_query_string({}, ['path']),
'prompt':u"/",
'classes':['filter', 'directory'],
'rel':"filter",
'group':"directory",
}
kwargs.update(link_kwargs)
link = self.get_link(**kwargs)
links.append(link)
for directory in self.dirs:
kwargs = {
'url':'./%s' % self.state.get_query_string({'path':directory}),
'prompt':directory,
'classes':['filter', 'directory'],
'rel':"filter",
'group':"directory",
}
kwargs.update(link_kwargs)
link = self.get_link(**kwargs)
links.append(link)
if '/' in self.path:
kwargs = {
'url':'./%s' % self.state.get_query_string({'path':self.path[:self.path.rfind('/')]}),
'prompt':u"../",
'classes':['filter', 'directory'],
'rel':"filter",
'group':"directory"
}
kwargs.update(link_kwargs)
link = self.get_link(**kwargs)
links.append(link)
return links
|
google/shaka-streamer | refs/heads/master | streamer/periodconcat_node.py | 1 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Concatenates inputs into periods by creating a master DASH/HLS file."""
import os
import re
from typing import List
from xml.etree import ElementTree
from streamer import __version__
from streamer.node_base import ProcessStatus, ThreadedNodeBase
from streamer.packager_node import PackagerNode
from streamer.pipeline_configuration import PipelineConfig, ManifestFormat, StreamingMode
class PeriodConcatNode(ThreadedNodeBase):
"""A node that concatenates multiple DASH manifests and/or HLS playlists
when the input is a multiperiod_inputs_list.
"""
def __init__(self,
pipeline_config: PipelineConfig,
packager_nodes: List[PackagerNode],
output_dir: str) -> None:
"""Stores all relevant information needed for the period concatenation."""
super().__init__(thread_name='periodconcat', continue_on_exception=False, sleep_time=3)
self._pipeline_config = pipeline_config
self._output_dir = output_dir
self._packager_nodes: List[PackagerNode] = packager_nodes
def _thread_single_pass(self) -> None:
"""Watches all the PackagerNode(s), if at least one of them is running it skips this
_thread_single_pass, if all of them are finished, it starts period concatenation, if one of
them is errored, it raises a RuntimeError.
"""
for i, packager_node in enumerate(self._packager_nodes):
status = packager_node.check_status()
if status == ProcessStatus.Running:
return
elif status == ProcessStatus.Errored:
raise RuntimeError('Concatenation is stopped due to an error in PackagerNode#{}.'.format(i))
if ManifestFormat.DASH in self._pipeline_config.manifest_format:
self._dash_concat()
if ManifestFormat.HLS in self._pipeline_config.manifest_format:
self._hls_concat()
self._status = ProcessStatus.Finished
def _dash_concat(self) -> None:
"""Concatenates multiple single-period DASH manifests into one multi-period DASH manifest."""
def find(elem: ElementTree.Element, *args: str) -> ElementTree.Element:
"""A better interface for the Element.find() method.
Use it only if it is guaranteed that the element we are searching for is inside,
Otherwise it will raise an AssertionError."""
full_path = '/'.join(['shaka-live:' + tag for tag in args])
child_elem = elem.find(full_path, {'shaka-live': default_dash_namespace})
# elem.find() returns either an ElementTree.Element or None.
assert child_elem is not None, 'Unable to find: {} using the namespace: {}'.format(
full_path, default_dash_namespace)
return child_elem
# Periods that are going to be collected from different MPD files.
periods: List[ElementTree.Element] = []
# Get the root of an MPD file that we will concatenate periods into.
concat_mpd = ElementTree.ElementTree(file=os.path.join(
self._packager_nodes[0].output_dir,
self._pipeline_config.dash_output)).getroot()
# Get the default namespace.
namespace_matches = re.search('\{([^}]*)\}', concat_mpd.tag)
assert namespace_matches is not None, 'Unable to find the default namespace.'
default_dash_namespace = namespace_matches.group(1)
# Remove the 'mediaPresentationDuration' attribute.
concat_mpd.attrib.pop('mediaPresentationDuration')
# Remove the Period element in that MPD element.
concat_mpd.remove(find(concat_mpd, 'Period'))
for packager_node in self._packager_nodes:
mpd = ElementTree.ElementTree(file=os.path.join(
packager_node.output_dir,
self._pipeline_config.dash_output)).getroot()
period = find(mpd, 'Period')
period.attrib['duration'] = mpd.attrib['mediaPresentationDuration']
# A BaseURL that will have the relative path to media file.
base_url = ElementTree.Element('BaseURL')
base_url.text = os.path.relpath(packager_node.output_dir, self._output_dir) + '/'
period.insert(0, base_url)
periods.append(period)
# Add the periods collected from all the files.
concat_mpd.extend(periods)
# Write the period concat to the output_dir.
with open(os.path.join(
self._output_dir,
self._pipeline_config.dash_output), 'w') as master_dash:
contents = "<?xml version='1.0' encoding='UTF-8'?>\n"
# TODO: Add Shaka-Packager version to this xml comment.
contents += "<!--Generated with https://github.com/google/shaka-packager -->\n"
contents += "<!--Made Multi-Period with https://github.com/google/shaka-streamer version {} -->\n".format(__version__)
# xml.ElementTree replaces the default namespace with 'ns0'.
# Register the DASH namespace back as the defualt namespace before converting to string.
ElementTree.register_namespace('', default_dash_namespace)
# xml.etree.ElementTree already have an ElementTree().write() method,
# but it won't allow putting comments at the begining of the file.
contents += ElementTree.tostring(element=concat_mpd, encoding='unicode')
master_dash.write(contents)
def _hls_concat(self) -> None:
"""Concatenates multiple HLS playlists with #EXT-X-DISCONTINUITY."""
import m3u8 # type: ignore
|
detrout/debian-statsmodels | refs/heads/debian | statsmodels/tsa/arima_model.py | 7 | # Note: The information criteria add 1 to the number of parameters
# whenever the model has an AR or MA term since, in principle,
# the variance could be treated as a free parameter and restricted
# This code does not allow this, but it adds consistency with other
# packages such as gretl and X12-ARIMA
from __future__ import absolute_import
from statsmodels.compat.python import string_types, range
# for 2to3 with extensions
from datetime import datetime
import numpy as np
from scipy import optimize
from scipy.stats import t, norm
from scipy.signal import lfilter
from numpy import dot, log, zeros, pi
from numpy.linalg import inv
from statsmodels.tools.decorators import (cache_readonly,
resettable_cache)
import statsmodels.tsa.base.tsa_model as tsbase
import statsmodels.base.wrapper as wrap
from statsmodels.regression.linear_model import yule_walker, GLS
from statsmodels.tsa.tsatools import (lagmat, add_trend,
_ar_transparams, _ar_invtransparams,
_ma_transparams, _ma_invtransparams,
unintegrate, unintegrate_levels)
from statsmodels.tsa.vector_ar import util
from statsmodels.tsa.ar_model import AR
from statsmodels.tsa.arima_process import arma2ma
from statsmodels.tools.numdiff import approx_hess_cs, approx_fprime_cs
from statsmodels.tsa.base.datetools import _index_date
from statsmodels.tsa.kalmanf import KalmanFilter
_armax_notes = """
Notes
-----
If exogenous variables are given, then the model that is fit is
.. math::
\\phi(L)(y_t - X_t\\beta) = \\theta(L)\epsilon_t
where :math:`\\phi` and :math:`\\theta` are polynomials in the lag
operator, :math:`L`. This is the regression model with ARMA errors,
or ARMAX model. This specification is used, whether or not the model
is fit using conditional sum of square or maximum-likelihood, using
the `method` argument in
:meth:`statsmodels.tsa.arima_model.%(Model)s.fit`. Therefore, for
now, `css` and `mle` refer to estimation methods only. This may
change for the case of the `css` model in future versions.
"""
_arma_params = """\
endog : array-like
The endogenous variable.
order : iterable
The (p,q) order of the model for the number of AR parameters,
differences, and MA parameters to use.
exog : array-like, optional
An optional arry of exogenous variables. This should *not* include a
constant or trend. You can specify this in the `fit` method."""
_arma_model = "Autoregressive Moving Average ARMA(p,q) Model"
_arima_model = "Autoregressive Integrated Moving Average ARIMA(p,d,q) Model"
_arima_params = """\
endog : array-like
The endogenous variable.
order : iterable
The (p,d,q) order of the model for the number of AR parameters,
differences, and MA parameters to use.
exog : array-like, optional
An optional arry of exogenous variables. This should *not* include a
constant or trend. You can specify this in the `fit` method."""
_predict_notes = """
Notes
-----
Use the results predict method instead.
"""
_results_notes = """
Notes
-----
It is recommended to use dates with the time-series models, as the
below will probably make clear. However, if ARIMA is used without
dates and/or `start` and `end` are given as indices, then these
indices are in terms of the *original*, undifferenced series. Ie.,
given some undifferenced observations::
1970Q1, 1
1970Q2, 1.5
1970Q3, 1.25
1970Q4, 2.25
1971Q1, 1.2
1971Q2, 4.1
1970Q1 is observation 0 in the original series. However, if we fit an
ARIMA(p,1,q) model then we lose this first observation through
differencing. Therefore, the first observation we can forecast (if
using exact MLE) is index 1. In the differenced series this is index
0, but we refer to it as 1 from the original series.
"""
_predict = """
%(Model)s model in-sample and out-of-sample prediction
Parameters
----------
%(params)s
start : int, str, or datetime
Zero-indexed observation number at which to start forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type.
end : int, str, or datetime
Zero-indexed observation number at which to end forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out of sample prediction.
exog : array-like, optional
If the model is an ARMAX and out-of-sample forecasting is
requested, exog must be given. Note that you'll need to pass
`k_ar` additional lags for any exogenous variables. E.g., if you
fit an ARMAX(2, q) model and want to predict 5 steps, you need 7
observations to do this.
dynamic : bool, optional
The `dynamic` keyword affects in-sample prediction. If dynamic
is False, then the in-sample lagged values are used for
prediction. If `dynamic` is True, then in-sample forecasts are
used in place of lagged dependent variables. The first forecasted
value is `start`.
%(extra_params)s
Returns
-------
%(returns)s
%(extra_section)s
"""
_predict_returns = """predict : array
The predicted values.
"""
_arma_predict = _predict % {"Model" : "ARMA",
"params" : """
params : array-like
The fitted parameters of the model.""",
"extra_params" : "",
"returns" : _predict_returns,
"extra_section" : _predict_notes}
_arma_results_predict = _predict % {"Model" : "ARMA", "params" : "",
"extra_params" : "",
"returns" : _predict_returns,
"extra_section" : _results_notes}
_arima_predict = _predict % {"Model" : "ARIMA",
"params" : """params : array-like
The fitted parameters of the model.""",
"extra_params" : """typ : str {'linear', 'levels'}
- 'linear' : Linear prediction in terms of the differenced
endogenous variables.
- 'levels' : Predict the levels of the original endogenous
variables.\n""", "returns" : _predict_returns,
"extra_section" : _predict_notes}
_arima_results_predict = _predict % {"Model" : "ARIMA",
"params" : "",
"extra_params" :
"""typ : str {'linear', 'levels'}
- 'linear' : Linear prediction in terms of the differenced
endogenous variables.
- 'levels' : Predict the levels of the original endogenous
variables.\n""",
"returns" : _predict_returns,
"extra_section" : _results_notes}
_arima_plot_predict_example = """ Examples
--------
>>> import statsmodels.api as sm
>>> import matplotlib.pyplot as plt
>>> import pandas as pd
>>>
>>> dta = sm.datasets.sunspots.load_pandas().data[['SUNACTIVITY']]
>>> dta.index = pd.DatetimeIndex(start='1700', end='2009', freq='A')
>>> res = sm.tsa.ARMA(dta, (3, 0)).fit()
>>> fig, ax = plt.subplots()
>>> ax = dta.ix['1950':].plot(ax=ax)
>>> fig = res.plot_predict('1990', '2012', dynamic=True, ax=ax,
... plot_insample=False)
>>> plt.show()
.. plot:: plots/arma_predict_plot.py
"""
_plot_predict = ("""
Plot forecasts
""" + '\n'.join(_predict.split('\n')[2:])) % {
"params" : "",
"extra_params" : """alpha : float, optional
The confidence intervals for the forecasts are (1 - alpha)%
plot_insample : bool, optional
Whether to plot the in-sample series. Default is True.
ax : matplotlib.Axes, optional
Existing axes to plot with.""",
"returns" : """fig : matplotlib.Figure
The plotted Figure instance""",
"extra_section" : ('\n' + _arima_plot_predict_example +
'\n' + _results_notes)
}
_arima_plot_predict = ("""
Plot forecasts
""" + '\n'.join(_predict.split('\n')[2:])) % {
"params" : "",
"extra_params" : """alpha : float, optional
The confidence intervals for the forecasts are (1 - alpha)%
plot_insample : bool, optional
Whether to plot the in-sample series. Default is True.
ax : matplotlib.Axes, optional
Existing axes to plot with.""",
"returns" : """fig : matplotlib.Figure
The plotted Figure instance""",
"extra_section" : ('\n' + _arima_plot_predict_example +
'\n' +
'\n'.join(_results_notes.split('\n')[:3]) +
("""
This is hard-coded to only allow plotting of the forecasts in levels.
""") +
'\n'.join(_results_notes.split('\n')[3:]))
}
def cumsum_n(x, n):
if n:
n -= 1
x = np.cumsum(x)
return cumsum_n(x, n)
else:
return x
def _check_arima_start(start, k_ar, k_diff, method, dynamic):
if start < 0:
raise ValueError("The start index %d of the original series "
"has been differenced away" % start)
elif (dynamic or 'mle' not in method) and start < k_ar:
raise ValueError("Start must be >= k_ar for conditional MLE "
"or dynamic forecast. Got %d" % start)
def _get_predict_out_of_sample(endog, p, q, k_trend, k_exog, start, errors,
trendparam, exparams, arparams, maparams, steps,
method, exog=None):
"""
Returns endog, resid, mu of appropriate length for out of sample
prediction.
"""
if q:
resid = np.zeros(q)
if start and 'mle' in method or (start == p and not start == 0):
resid[:q] = errors[start-q:start]
elif start:
resid[:q] = errors[start-q-p:start-p]
else:
resid[:q] = errors[-q:]
else:
resid = None
y = endog
if k_trend == 1:
# use expectation not constant
if k_exog > 0:
#TODO: technically should only hold for MLE not
# conditional model. See #274.
# ensure 2-d for conformability
if np.ndim(exog) == 1 and k_exog == 1:
# have a 1d series of observations -> 2d
exog = exog[:, None]
elif np.ndim(exog) == 1:
# should have a 1d row of exog -> 2d
if len(exog) != k_exog:
raise ValueError("1d exog given and len(exog) != k_exog")
exog = exog[None, :]
X = lagmat(np.dot(exog, exparams), p, original='in', trim='both')
mu = trendparam * (1 - arparams.sum())
# arparams were reversed in unpack for ease later
mu = mu + (np.r_[1, -arparams[::-1]] * X).sum(1)[:, None]
else:
mu = trendparam * (1 - arparams.sum())
mu = np.array([mu]*steps)
elif k_exog > 0:
X = np.dot(exog, exparams)
#NOTE: you shouldn't have to give in-sample exog!
X = lagmat(X, p, original='in', trim='both')
mu = (np.r_[1, -arparams[::-1]] * X).sum(1)[:, None]
else:
mu = np.zeros(steps)
endog = np.zeros(p + steps - 1)
if p and start:
endog[:p] = y[start-p:start]
elif p:
endog[:p] = y[-p:]
return endog, resid, mu
def _arma_predict_out_of_sample(params, steps, errors, p, q, k_trend, k_exog,
endog, exog=None, start=0, method='mle'):
(trendparam, exparams,
arparams, maparams) = _unpack_params(params, (p, q), k_trend,
k_exog, reverse=True)
endog, resid, mu = _get_predict_out_of_sample(endog, p, q, k_trend, k_exog,
start, errors, trendparam,
exparams, arparams,
maparams, steps, method,
exog)
forecast = np.zeros(steps)
if steps == 1:
if q:
return mu[0] + np.dot(arparams, endog[:p]) + np.dot(maparams,
resid[:q])
else:
return mu[0] + np.dot(arparams, endog[:p])
if q:
i = 0 # if q == 1
else:
i = -1
for i in range(min(q, steps - 1)):
fcast = (mu[i] + np.dot(arparams, endog[i:i + p]) +
np.dot(maparams[:q - i], resid[i:i + q]))
forecast[i] = fcast
endog[i+p] = fcast
for i in range(i + 1, steps - 1):
fcast = mu[i] + np.dot(arparams, endog[i:i+p])
forecast[i] = fcast
endog[i+p] = fcast
#need to do one more without updating endog
forecast[-1] = mu[-1] + np.dot(arparams, endog[steps - 1:])
return forecast
def _arma_predict_in_sample(start, end, endog, resid, k_ar, method):
"""
Pre- and in-sample fitting for ARMA.
"""
if 'mle' in method:
fittedvalues = endog - resid # get them all then trim
else:
fittedvalues = endog[k_ar:] - resid
fv_start = start
if 'mle' not in method:
fv_start -= k_ar # start is in terms of endog index
fv_end = min(len(fittedvalues), end + 1)
return fittedvalues[fv_start:fv_end]
def _validate(start, k_ar, k_diff, dates, method):
if isinstance(start, (string_types, datetime)):
start = _index_date(start, dates)
start -= k_diff
if 'mle' not in method and start < k_ar - k_diff:
raise ValueError("Start must be >= k_ar for conditional "
"MLE or dynamic forecast. Got %s" % start)
return start
def _unpack_params(params, order, k_trend, k_exog, reverse=False):
p, q = order
k = k_trend + k_exog
maparams = params[k+p:]
arparams = params[k:k+p]
trend = params[:k_trend]
exparams = params[k_trend:k]
if reverse:
return trend, exparams, arparams[::-1], maparams[::-1]
return trend, exparams, arparams, maparams
def _unpack_order(order):
k_ar, k_ma, k = order
k_lags = max(k_ar, k_ma+1)
return k_ar, k_ma, order, k_lags
def _make_arma_names(data, k_trend, order, exog_names):
k_ar, k_ma = order
exog_names = exog_names or []
ar_lag_names = util.make_lag_names([data.ynames], k_ar, 0)
ar_lag_names = [''.join(('ar.', i)) for i in ar_lag_names]
ma_lag_names = util.make_lag_names([data.ynames], k_ma, 0)
ma_lag_names = [''.join(('ma.', i)) for i in ma_lag_names]
trend_name = util.make_lag_names('', 0, k_trend)
exog_names = trend_name + exog_names + ar_lag_names + ma_lag_names
return exog_names
def _make_arma_exog(endog, exog, trend):
k_trend = 1 # overwritten if no constant
if exog is None and trend == 'c': # constant only
exog = np.ones((len(endog), 1))
elif exog is not None and trend == 'c': # constant plus exogenous
exog = add_trend(exog, trend='c', prepend=True)
elif exog is not None and trend == 'nc':
# make sure it's not holding constant from last run
if exog.var() == 0:
exog = None
k_trend = 0
if trend == 'nc':
k_trend = 0
return k_trend, exog
def _check_estimable(nobs, n_params):
if nobs <= n_params:
raise ValueError("Insufficient degrees of freedom to estimate")
class ARMA(tsbase.TimeSeriesModel):
__doc__ = tsbase._tsa_doc % {"model" : _arma_model,
"params" : _arma_params, "extra_params" : "",
"extra_sections" : _armax_notes %
{"Model" : "ARMA"}}
def __init__(self, endog, order, exog=None, dates=None, freq=None,
missing='none'):
super(ARMA, self).__init__(endog, exog, dates, freq, missing=missing)
exog = self.data.exog # get it after it's gone through processing
_check_estimable(len(self.endog), sum(order))
self.k_ar = k_ar = order[0]
self.k_ma = k_ma = order[1]
self.k_lags = max(k_ar, k_ma+1)
if exog is not None:
if exog.ndim == 1:
exog = exog[:, None]
k_exog = exog.shape[1] # number of exog. variables excl. const
else:
k_exog = 0
self.k_exog = k_exog
def _fit_start_params_hr(self, order):
"""
Get starting parameters for fit.
Parameters
----------
order : iterable
(p,q,k) - AR lags, MA lags, and number of exogenous variables
including the constant.
Returns
-------
start_params : array
A first guess at the starting parameters.
Notes
-----
If necessary, fits an AR process with the laglength selected according
to best BIC. Obtain the residuals. Then fit an ARMA(p,q) model via
OLS using these residuals for a first approximation. Uses a separate
OLS regression to find the coefficients of exogenous variables.
References
----------
Hannan, E.J. and Rissanen, J. 1982. "Recursive estimation of mixed
autoregressive-moving average order." `Biometrika`. 69.1.
"""
p, q, k = order
start_params = zeros((p+q+k))
endog = self.endog.copy() # copy because overwritten
exog = self.exog
if k != 0:
ols_params = GLS(endog, exog).fit().params
start_params[:k] = ols_params
endog -= np.dot(exog, ols_params).squeeze()
if q != 0:
if p != 0:
# make sure we don't run into small data problems in AR fit
nobs = len(endog)
maxlag = int(round(12*(nobs/100.)**(1/4.)))
if maxlag >= nobs:
maxlag = nobs - 1
armod = AR(endog).fit(ic='bic', trend='nc', maxlag=maxlag)
arcoefs_tmp = armod.params
p_tmp = armod.k_ar
# it's possible in small samples that optimal lag-order
# doesn't leave enough obs. No consistent way to fix.
if p_tmp + q >= len(endog):
raise ValueError("Proper starting parameters cannot"
" be found for this order with this "
"number of observations. Use the "
"start_params argument.")
resid = endog[p_tmp:] - np.dot(lagmat(endog, p_tmp,
trim='both'),
arcoefs_tmp)
if p < p_tmp + q:
endog_start = p_tmp + q - p
resid_start = 0
else:
endog_start = 0
resid_start = p - p_tmp - q
lag_endog = lagmat(endog, p, 'both')[endog_start:]
lag_resid = lagmat(resid, q, 'both')[resid_start:]
# stack ar lags and resids
X = np.column_stack((lag_endog, lag_resid))
coefs = GLS(endog[max(p_tmp + q, p):], X).fit().params
start_params[k:k+p+q] = coefs
else:
start_params[k+p:k+p+q] = yule_walker(endog, order=q)[0]
if q == 0 and p != 0:
arcoefs = yule_walker(endog, order=p)[0]
start_params[k:k+p] = arcoefs
# check AR coefficients
if p and not np.all(np.abs(np.roots(np.r_[1, -start_params[k:k + p]]
)) < 1):
raise ValueError("The computed initial AR coefficients are not "
"stationary\nYou should induce stationarity, "
"choose a different model order, or you can\n"
"pass your own start_params.")
# check MA coefficients
elif q and not np.all(np.abs(np.roots(np.r_[1, start_params[k + p:]]
)) < 1):
raise ValueError("The computed initial MA coefficients are not "
"invertible\nYou should induce invertibility, "
"choose a different model order, or you can\n"
"pass your own start_params.")
# check MA coefficients
return start_params
def _fit_start_params(self, order, method):
if method != 'css-mle': # use Hannan-Rissanen to get start params
start_params = self._fit_start_params_hr(order)
else: # use CSS to get start params
func = lambda params: -self.loglike_css(params)
#start_params = [.1]*(k_ar+k_ma+k_exog) # different one for k?
start_params = self._fit_start_params_hr(order)
if self.transparams:
start_params = self._invtransparams(start_params)
bounds = [(None,)*2]*sum(order)
mlefit = optimize.fmin_l_bfgs_b(func, start_params,
approx_grad=True, m=12,
pgtol=1e-7, factr=1e3,
bounds=bounds, iprint=-1)
start_params = self._transparams(mlefit[0])
return start_params
def score(self, params):
"""
Compute the score function at params.
Notes
-----
This is a numerical approximation.
"""
return approx_fprime_cs(params, self.loglike, args=(False,))
def hessian(self, params):
"""
Compute the Hessian at params,
Notes
-----
This is a numerical approximation.
"""
return approx_hess_cs(params, self.loglike, args=(False,))
def _transparams(self, params):
"""
Transforms params to induce stationarity/invertability.
Reference
---------
Jones(1980)
"""
k_ar, k_ma = self.k_ar, self.k_ma
k = self.k_exog + self.k_trend
newparams = np.zeros_like(params)
# just copy exogenous parameters
if k != 0:
newparams[:k] = params[:k]
# AR Coeffs
if k_ar != 0:
newparams[k:k+k_ar] = _ar_transparams(params[k:k+k_ar].copy())
# MA Coeffs
if k_ma != 0:
newparams[k+k_ar:] = _ma_transparams(params[k+k_ar:].copy())
return newparams
def _invtransparams(self, start_params):
"""
Inverse of the Jones reparameterization
"""
k_ar, k_ma = self.k_ar, self.k_ma
k = self.k_exog + self.k_trend
newparams = start_params.copy()
arcoefs = newparams[k:k+k_ar]
macoefs = newparams[k+k_ar:]
# AR coeffs
if k_ar != 0:
newparams[k:k+k_ar] = _ar_invtransparams(arcoefs)
# MA coeffs
if k_ma != 0:
newparams[k+k_ar:k+k_ar+k_ma] = _ma_invtransparams(macoefs)
return newparams
def _get_predict_start(self, start, dynamic):
# do some defaults
method = getattr(self, 'method', 'mle')
k_ar = getattr(self, 'k_ar', 0)
k_diff = getattr(self, 'k_diff', 0)
if start is None:
if 'mle' in method and not dynamic:
start = 0
else:
start = k_ar
self._set_predict_start_date(start) # else it's done in super
elif isinstance(start, int):
start = super(ARMA, self)._get_predict_start(start)
else: # should be on a date
#elif 'mle' not in method or dynamic: # should be on a date
start = _validate(start, k_ar, k_diff, self.data.dates,
method)
start = super(ARMA, self)._get_predict_start(start)
_check_arima_start(start, k_ar, k_diff, method, dynamic)
return start
def _get_predict_end(self, end, dynamic=False):
# pass through so predict works for ARIMA and ARMA
return super(ARMA, self)._get_predict_end(end)
def geterrors(self, params):
"""
Get the errors of the ARMA process.
Parameters
----------
params : array-like
The fitted ARMA parameters
order : array-like
3 item iterable, with the number of AR, MA, and exogenous
parameters, including the trend
"""
#start = self._get_predict_start(start) # will be an index of a date
#end, out_of_sample = self._get_predict_end(end)
params = np.asarray(params)
k_ar, k_ma = self.k_ar, self.k_ma
k = self.k_exog + self.k_trend
method = getattr(self, 'method', 'mle')
if 'mle' in method: # use KalmanFilter to get errors
(y, k, nobs, k_ar, k_ma, k_lags, newparams, Z_mat, m, R_mat,
T_mat, paramsdtype) = KalmanFilter._init_kalman_state(params,
self)
errors = KalmanFilter.geterrors(y, k, k_ar, k_ma, k_lags, nobs,
Z_mat, m, R_mat, T_mat,
paramsdtype)
if isinstance(errors, tuple):
errors = errors[0] # non-cython version returns a tuple
else: # use scipy.signal.lfilter
y = self.endog.copy()
k = self.k_exog + self.k_trend
if k > 0:
y -= dot(self.exog, params[:k])
k_ar = self.k_ar
k_ma = self.k_ma
(trendparams, exparams,
arparams, maparams) = _unpack_params(params, (k_ar, k_ma),
self.k_trend, self.k_exog,
reverse=False)
b, a = np.r_[1, -arparams], np.r_[1, maparams]
zi = zeros((max(k_ar, k_ma)))
for i in range(k_ar):
zi[i] = sum(-b[:i+1][::-1]*y[:i+1])
e = lfilter(b, a, y, zi=zi)
errors = e[0][k_ar:]
return errors.squeeze()
def predict(self, params, start=None, end=None, exog=None, dynamic=False):
method = getattr(self, 'method', 'mle') # don't assume fit
#params = np.asarray(params)
# will return an index of a date
start = self._get_predict_start(start, dynamic)
end, out_of_sample = self._get_predict_end(end, dynamic)
if out_of_sample and (exog is None and self.k_exog > 0):
raise ValueError("You must provide exog for ARMAX")
endog = self.endog
resid = self.geterrors(params)
k_ar = self.k_ar
if out_of_sample != 0 and self.k_exog > 0:
if self.k_exog == 1 and exog.ndim == 1:
exog = exog[:, None]
# we need the last k_ar exog for the lag-polynomial
if self.k_exog > 0 and k_ar > 0:
# need the last k_ar exog for the lag-polynomial
exog = np.vstack((self.exog[-k_ar:, self.k_trend:], exog))
if dynamic:
#TODO: now that predict does dynamic in-sample it should
# also return error estimates and confidence intervals
# but how? len(endog) is not tot_obs
out_of_sample += end - start + 1
return _arma_predict_out_of_sample(params, out_of_sample, resid,
k_ar, self.k_ma, self.k_trend,
self.k_exog, endog, exog,
start, method)
predictedvalues = _arma_predict_in_sample(start, end, endog, resid,
k_ar, method)
if out_of_sample:
forecastvalues = _arma_predict_out_of_sample(params, out_of_sample,
resid, k_ar,
self.k_ma,
self.k_trend,
self.k_exog, endog,
exog, method=method)
predictedvalues = np.r_[predictedvalues, forecastvalues]
return predictedvalues
predict.__doc__ = _arma_predict
def loglike(self, params, set_sigma2=True):
"""
Compute the log-likelihood for ARMA(p,q) model
Notes
-----
Likelihood used depends on the method set in fit
"""
method = self.method
if method in ['mle', 'css-mle']:
return self.loglike_kalman(params, set_sigma2)
elif method == 'css':
return self.loglike_css(params, set_sigma2)
else:
raise ValueError("Method %s not understood" % method)
def loglike_kalman(self, params, set_sigma2=True):
"""
Compute exact loglikelihood for ARMA(p,q) model by the Kalman Filter.
"""
return KalmanFilter.loglike(params, self, set_sigma2)
def loglike_css(self, params, set_sigma2=True):
"""
Conditional Sum of Squares likelihood function.
"""
k_ar = self.k_ar
k_ma = self.k_ma
k = self.k_exog + self.k_trend
y = self.endog.copy().astype(params.dtype)
nobs = self.nobs
# how to handle if empty?
if self.transparams:
newparams = self._transparams(params)
else:
newparams = params
if k > 0:
y -= dot(self.exog, newparams[:k])
# the order of p determines how many zeros errors to set for lfilter
b, a = np.r_[1, -newparams[k:k + k_ar]], np.r_[1, newparams[k + k_ar:]]
zi = np.zeros((max(k_ar, k_ma)), dtype=params.dtype)
for i in range(k_ar):
zi[i] = sum(-b[:i + 1][::-1] * y[:i + 1])
errors = lfilter(b, a, y, zi=zi)[0][k_ar:]
ssr = np.dot(errors, errors)
sigma2 = ssr/nobs
if set_sigma2:
self.sigma2 = sigma2
llf = -nobs/2.*(log(2*pi) + log(sigma2)) - ssr/(2*sigma2)
return llf
def fit(self, start_params=None, trend='c', method="css-mle",
transparams=True, solver='lbfgs', maxiter=50, full_output=1,
disp=5, callback=None, **kwargs):
"""
Fits ARMA(p,q) model using exact maximum likelihood via Kalman filter.
Parameters
----------
start_params : array-like, optional
Starting parameters for ARMA(p,q). If None, the default is given
by ARMA._fit_start_params. See there for more information.
transparams : bool, optional
Whehter or not to transform the parameters to ensure stationarity.
Uses the transformation suggested in Jones (1980). If False,
no checking for stationarity or invertibility is done.
method : str {'css-mle','mle','css'}
This is the loglikelihood to maximize. If "css-mle", the
conditional sum of squares likelihood is maximized and its values
are used as starting values for the computation of the exact
likelihood via the Kalman filter. If "mle", the exact likelihood
is maximized via the Kalman Filter. If "css" the conditional sum
of squares likelihood is maximized. All three methods use
`start_params` as starting parameters. See above for more
information.
trend : str {'c','nc'}
Whether to include a constant or not. 'c' includes constant,
'nc' no constant.
solver : str or None, optional
Solver to be used. The default is 'lbfgs' (limited memory
Broyden-Fletcher-Goldfarb-Shanno). Other choices are 'bfgs',
'newton' (Newton-Raphson), 'nm' (Nelder-Mead), 'cg' -
(conjugate gradient), 'ncg' (non-conjugate gradient), and
'powell'. By default, the limited memory BFGS uses m=12 to
approximate the Hessian, projected gradient tolerance of 1e-8 and
factr = 1e2. You can change these by using kwargs.
maxiter : int, optional
The maximum number of function evaluations. Default is 50.
tol : float
The convergence tolerance. Default is 1e-08.
full_output : bool, optional
If True, all output from solver will be available in
the Results object's mle_retvals attribute. Output is dependent
on the solver. See Notes for more information.
disp : bool, optional
If True, convergence information is printed. For the default
l_bfgs_b solver, disp controls the frequency of the output during
the iterations. disp < 0 means no output in this case.
callback : function, optional
Called after each iteration as callback(xk) where xk is the current
parameter vector.
kwargs
See Notes for keyword arguments that can be passed to fit.
Returns
-------
statsmodels.tsa.arima_model.ARMAResults class
See also
--------
statsmodels.base.model.LikelihoodModel.fit : for more information
on using the solvers.
ARMAResults : results class returned by fit
Notes
------
If fit by 'mle', it is assumed for the Kalman Filter that the initial
unkown state is zero, and that the inital variance is
P = dot(inv(identity(m**2)-kron(T,T)),dot(R,R.T).ravel('F')).reshape(r,
r, order = 'F')
"""
k_ar = self.k_ar
k_ma = self.k_ma
# enforce invertibility
self.transparams = transparams
endog, exog = self.endog, self.exog
k_exog = self.k_exog
self.nobs = len(endog) # this is overwritten if method is 'css'
# (re)set trend and handle exogenous variables
# always pass original exog
k_trend, exog = _make_arma_exog(endog, self.exog, trend)
# Check has something to estimate
if k_ar == 0 and k_ma == 0 and k_trend == 0 and k_exog == 0:
raise ValueError("Estimation requires the inclusion of least one "
"AR term, MA term, a constant or an exogenous "
"variable.")
# check again now that we know the trend
_check_estimable(len(endog), k_ar + k_ma + k_exog + k_trend)
self.k_trend = k_trend
self.exog = exog # overwrites original exog from __init__
# (re)set names for this model
self.exog_names = _make_arma_names(self.data, k_trend, (k_ar, k_ma),
self.exog_names)
k = k_trend + k_exog
# choose objective function
if k_ma == 0 and k_ar == 0:
method = "css" # Always CSS when no AR or MA terms
self.method = method = method.lower()
# adjust nobs for css
if method == 'css':
self.nobs = len(self.endog) - k_ar
if start_params is not None:
start_params = np.asarray(start_params)
else: # estimate starting parameters
start_params = self._fit_start_params((k_ar, k_ma, k), method)
if transparams: # transform initial parameters to ensure invertibility
start_params = self._invtransparams(start_params)
if solver == 'lbfgs':
kwargs.setdefault('pgtol', 1e-8)
kwargs.setdefault('factr', 1e2)
kwargs.setdefault('m', 12)
kwargs.setdefault('approx_grad', True)
mlefit = super(ARMA, self).fit(start_params, method=solver,
maxiter=maxiter,
full_output=full_output, disp=disp,
callback=callback, **kwargs)
params = mlefit.params
if transparams: # transform parameters back
params = self._transparams(params)
self.transparams = False # so methods don't expect transf.
normalized_cov_params = None # TODO: fix this
armafit = ARMAResults(self, params, normalized_cov_params)
armafit.mle_retvals = mlefit.mle_retvals
armafit.mle_settings = mlefit.mle_settings
armafit.mlefit = mlefit
return ARMAResultsWrapper(armafit)
#NOTE: the length of endog changes when we give a difference to fit
#so model methods are not the same on unfit models as fit ones
#starting to think that order of model should be put in instantiation...
class ARIMA(ARMA):
__doc__ = tsbase._tsa_doc % {"model" : _arima_model,
"params" : _arima_params, "extra_params" : "",
"extra_sections" : _armax_notes %
{"Model" : "ARIMA"}}
def __new__(cls, endog, order, exog=None, dates=None, freq=None,
missing='none'):
p, d, q = order
if d == 0: # then we just use an ARMA model
return ARMA(endog, (p, q), exog, dates, freq, missing)
else:
mod = super(ARIMA, cls).__new__(cls)
mod.__init__(endog, order, exog, dates, freq, missing)
return mod
def __init__(self, endog, order, exog=None, dates=None, freq=None,
missing='none'):
p, d, q = order
if d > 2:
#NOTE: to make more general, need to address the d == 2 stuff
# in the predict method
raise ValueError("d > 2 is not supported")
super(ARIMA, self).__init__(endog, (p, q), exog, dates, freq, missing)
self.k_diff = d
self._first_unintegrate = unintegrate_levels(self.endog[:d], d)
self.endog = np.diff(self.endog, n=d)
#NOTE: will check in ARMA but check again since differenced now
_check_estimable(len(self.endog), p+q)
if exog is not None:
self.exog = self.exog[d:]
if d == 1:
self.data.ynames = 'D.' + self.endog_names
else:
self.data.ynames = 'D{0:d}.'.format(d) + self.endog_names
# what about exog, should we difference it automatically before
# super call?
def _get_predict_start(self, start, dynamic):
"""
"""
#TODO: remove all these getattr and move order specification to
# class constructor
k_diff = getattr(self, 'k_diff', 0)
method = getattr(self, 'method', 'mle')
k_ar = getattr(self, 'k_ar', 0)
if start is None:
if 'mle' in method and not dynamic:
start = 0
else:
start = k_ar
elif isinstance(start, int):
start -= k_diff
try: # catch when given an integer outside of dates index
start = super(ARIMA, self)._get_predict_start(start,
dynamic)
except IndexError:
raise ValueError("start must be in series. "
"got %d" % (start + k_diff))
else: # received a date
start = _validate(start, k_ar, k_diff, self.data.dates,
method)
start = super(ARIMA, self)._get_predict_start(start, dynamic)
# reset date for k_diff adjustment
self._set_predict_start_date(start + k_diff)
return start
def _get_predict_end(self, end, dynamic=False):
"""
Returns last index to be forecast of the differenced array.
Handling of inclusiveness should be done in the predict function.
"""
end, out_of_sample = super(ARIMA, self)._get_predict_end(end, dynamic)
if 'mle' not in self.method and not dynamic:
end -= self.k_ar
return end - self.k_diff, out_of_sample
def fit(self, start_params=None, trend='c', method="css-mle",
transparams=True, solver='lbfgs', maxiter=50, full_output=1,
disp=5, callback=None, **kwargs):
"""
Fits ARIMA(p,d,q) model by exact maximum likelihood via Kalman filter.
Parameters
----------
start_params : array-like, optional
Starting parameters for ARMA(p,q). If None, the default is given
by ARMA._fit_start_params. See there for more information.
transparams : bool, optional
Whehter or not to transform the parameters to ensure stationarity.
Uses the transformation suggested in Jones (1980). If False,
no checking for stationarity or invertibility is done.
method : str {'css-mle','mle','css'}
This is the loglikelihood to maximize. If "css-mle", the
conditional sum of squares likelihood is maximized and its values
are used as starting values for the computation of the exact
likelihood via the Kalman filter. If "mle", the exact likelihood
is maximized via the Kalman Filter. If "css" the conditional sum
of squares likelihood is maximized. All three methods use
`start_params` as starting parameters. See above for more
information.
trend : str {'c','nc'}
Whether to include a constant or not. 'c' includes constant,
'nc' no constant.
solver : str or None, optional
Solver to be used. The default is 'lbfgs' (limited memory
Broyden-Fletcher-Goldfarb-Shanno). Other choices are 'bfgs',
'newton' (Newton-Raphson), 'nm' (Nelder-Mead), 'cg' -
(conjugate gradient), 'ncg' (non-conjugate gradient), and
'powell'. By default, the limited memory BFGS uses m=12 to
approximate the Hessian, projected gradient tolerance of 1e-8 and
factr = 1e2. You can change these by using kwargs.
maxiter : int, optional
The maximum number of function evaluations. Default is 50.
tol : float
The convergence tolerance. Default is 1e-08.
full_output : bool, optional
If True, all output from solver will be available in
the Results object's mle_retvals attribute. Output is dependent
on the solver. See Notes for more information.
disp : bool, optional
If True, convergence information is printed. For the default
l_bfgs_b solver, disp controls the frequency of the output during
the iterations. disp < 0 means no output in this case.
callback : function, optional
Called after each iteration as callback(xk) where xk is the current
parameter vector.
kwargs
See Notes for keyword arguments that can be passed to fit.
Returns
-------
`statsmodels.tsa.arima.ARIMAResults` class
See also
--------
statsmodels.base.model.LikelihoodModel.fit : for more information
on using the solvers.
ARIMAResults : results class returned by fit
Notes
------
If fit by 'mle', it is assumed for the Kalman Filter that the initial
unkown state is zero, and that the inital variance is
P = dot(inv(identity(m**2)-kron(T,T)),dot(R,R.T).ravel('F')).reshape(r,
r, order = 'F')
"""
arima_fit = super(ARIMA, self).fit(start_params, trend,
method, transparams, solver,
maxiter, full_output, disp,
callback, **kwargs)
normalized_cov_params = None # TODO: fix this?
arima_fit = ARIMAResults(self, arima_fit._results.params,
normalized_cov_params)
arima_fit.k_diff = self.k_diff
return ARIMAResultsWrapper(arima_fit)
def predict(self, params, start=None, end=None, exog=None, typ='linear',
dynamic=False):
# go ahead and convert to an index for easier checking
if isinstance(start, (string_types, datetime)):
start = _index_date(start, self.data.dates)
if typ == 'linear':
if not dynamic or (start != self.k_ar + self.k_diff and
start is not None):
return super(ARIMA, self).predict(params, start, end, exog,
dynamic)
else:
# need to assume pre-sample residuals are zero
# do this by a hack
q = self.k_ma
self.k_ma = 0
predictedvalues = super(ARIMA, self).predict(params, start,
end, exog,
dynamic)
self.k_ma = q
return predictedvalues
elif typ == 'levels':
endog = self.data.endog
if not dynamic:
predict = super(ARIMA, self).predict(params, start, end,
dynamic)
start = self._get_predict_start(start, dynamic)
end, out_of_sample = self._get_predict_end(end)
d = self.k_diff
if 'mle' in self.method:
start += d - 1 # for case where d == 2
end += d - 1
# add each predicted diff to lagged endog
if out_of_sample:
fv = predict[:-out_of_sample] + endog[start:end+1]
if d == 2: #TODO: make a general solution to this
fv += np.diff(endog[start - 1:end + 1])
levels = unintegrate_levels(endog[-d:], d)
fv = np.r_[fv,
unintegrate(predict[-out_of_sample:],
levels)[d:]]
else:
fv = predict + endog[start:end + 1]
if d == 2:
fv += np.diff(endog[start - 1:end + 1])
else:
k_ar = self.k_ar
if out_of_sample:
fv = (predict[:-out_of_sample] +
endog[max(start, self.k_ar-1):end+k_ar+1])
if d == 2:
fv += np.diff(endog[start - 1:end + 1])
levels = unintegrate_levels(endog[-d:], d)
fv = np.r_[fv,
unintegrate(predict[-out_of_sample:],
levels)[d:]]
else:
fv = predict + endog[max(start, k_ar):end+k_ar+1]
if d == 2:
fv += np.diff(endog[start - 1:end + 1])
else:
#IFF we need to use pre-sample values assume pre-sample
# residuals are zero, do this by a hack
if start == self.k_ar + self.k_diff or start is None:
# do the first k_diff+1 separately
p = self.k_ar
q = self.k_ma
k_exog = self.k_exog
k_trend = self.k_trend
k_diff = self.k_diff
(trendparam, exparams,
arparams, maparams) = _unpack_params(params, (p, q),
k_trend,
k_exog,
reverse=True)
# this is the hack
self.k_ma = 0
predict = super(ARIMA, self).predict(params, start, end,
exog, dynamic)
if not start:
start = self._get_predict_start(start, dynamic)
start += k_diff
self.k_ma = q
return endog[start-1] + np.cumsum(predict)
else:
predict = super(ARIMA, self).predict(params, start, end,
exog, dynamic)
return endog[start-1] + np.cumsum(predict)
return fv
else: # pragma : no cover
raise ValueError("typ %s not understood" % typ)
predict.__doc__ = _arima_predict
class ARMAResults(tsbase.TimeSeriesModelResults):
"""
Class to hold results from fitting an ARMA model.
Parameters
----------
model : ARMA instance
The fitted model instance
params : array
Fitted parameters
normalized_cov_params : array, optional
The normalized variance covariance matrix
scale : float, optional
Optional argument to scale the variance covariance matrix.
Returns
--------
**Attributes**
aic : float
Akaike Information Criterion
:math:`-2*llf+2* df_model`
where `df_model` includes all AR parameters, MA parameters, constant
terms parameters on constant terms and the variance.
arparams : array
The parameters associated with the AR coefficients in the model.
arroots : array
The roots of the AR coefficients are the solution to
(1 - arparams[0]*z - arparams[1]*z**2 -...- arparams[p-1]*z**k_ar) = 0
Stability requires that the roots in modulus lie outside the unit
circle.
bic : float
Bayes Information Criterion
-2*llf + log(nobs)*df_model
Where if the model is fit using conditional sum of squares, the
number of observations `nobs` does not include the `p` pre-sample
observations.
bse : array
The standard errors of the parameters. These are computed using the
numerical Hessian.
df_model : array
The model degrees of freedom = `k_exog` + `k_trend` + `k_ar` + `k_ma`
df_resid : array
The residual degrees of freedom = `nobs` - `df_model`
fittedvalues : array
The predicted values of the model.
hqic : float
Hannan-Quinn Information Criterion
-2*llf + 2*(`df_model`)*log(log(nobs))
Like `bic` if the model is fit using conditional sum of squares then
the `k_ar` pre-sample observations are not counted in `nobs`.
k_ar : int
The number of AR coefficients in the model.
k_exog : int
The number of exogenous variables included in the model. Does not
include the constant.
k_ma : int
The number of MA coefficients.
k_trend : int
This is 0 for no constant or 1 if a constant is included.
llf : float
The value of the log-likelihood function evaluated at `params`.
maparams : array
The value of the moving average coefficients.
maroots : array
The roots of the MA coefficients are the solution to
(1 + maparams[0]*z + maparams[1]*z**2 + ... + maparams[q-1]*z**q) = 0
Stability requires that the roots in modules lie outside the unit
circle.
model : ARMA instance
A reference to the model that was fit.
nobs : float
The number of observations used to fit the model. If the model is fit
using exact maximum likelihood this is equal to the total number of
observations, `n_totobs`. If the model is fit using conditional
maximum likelihood this is equal to `n_totobs` - `k_ar`.
n_totobs : float
The total number of observations for `endog`. This includes all
observations, even pre-sample values if the model is fit using `css`.
params : array
The parameters of the model. The order of variables is the trend
coefficients and the `k_exog` exognous coefficients, then the
`k_ar` AR coefficients, and finally the `k_ma` MA coefficients.
pvalues : array
The p-values associated with the t-values of the coefficients. Note
that the coefficients are assumed to have a Student's T distribution.
resid : array
The model residuals. If the model is fit using 'mle' then the
residuals are created via the Kalman Filter. If the model is fit
using 'css' then the residuals are obtained via `scipy.signal.lfilter`
adjusted such that the first `k_ma` residuals are zero. These zero
residuals are not returned.
scale : float
This is currently set to 1.0 and not used by the model or its results.
sigma2 : float
The variance of the residuals. If the model is fit by 'css',
sigma2 = ssr/nobs, where ssr is the sum of squared residuals. If
the model is fit by 'mle', then sigma2 = 1/nobs * sum(v**2 / F)
where v is the one-step forecast error and F is the forecast error
variance. See `nobs` for the difference in definitions depending on the
fit.
"""
_cache = {}
#TODO: use this for docstring when we fix nobs issue
def __init__(self, model, params, normalized_cov_params=None, scale=1.):
super(ARMAResults, self).__init__(model, params, normalized_cov_params,
scale)
self.sigma2 = model.sigma2
nobs = model.nobs
self.nobs = nobs
k_exog = model.k_exog
self.k_exog = k_exog
k_trend = model.k_trend
self.k_trend = k_trend
k_ar = model.k_ar
self.k_ar = k_ar
self.n_totobs = len(model.endog)
k_ma = model.k_ma
self.k_ma = k_ma
df_model = k_exog + k_trend + k_ar + k_ma
self._ic_df_model = df_model + 1
self.df_model = df_model
self.df_resid = self.nobs - df_model
self._cache = resettable_cache()
@cache_readonly
def arroots(self):
return np.roots(np.r_[1, -self.arparams])**-1
@cache_readonly
def maroots(self):
return np.roots(np.r_[1, self.maparams])**-1
@cache_readonly
def arfreq(self):
r"""
Returns the frequency of the AR roots.
This is the solution, x, to z = abs(z)*exp(2j*np.pi*x) where z are the
roots.
"""
z = self.arroots
if not z.size:
return
return np.arctan2(z.imag, z.real) / (2*pi)
@cache_readonly
def mafreq(self):
r"""
Returns the frequency of the MA roots.
This is the solution, x, to z = abs(z)*exp(2j*np.pi*x) where z are the
roots.
"""
z = self.maroots
if not z.size:
return
return np.arctan2(z.imag, z.real) / (2*pi)
@cache_readonly
def arparams(self):
k = self.k_exog + self.k_trend
return self.params[k:k+self.k_ar]
@cache_readonly
def maparams(self):
k = self.k_exog + self.k_trend
k_ar = self.k_ar
return self.params[k+k_ar:]
@cache_readonly
def llf(self):
return self.model.loglike(self.params)
@cache_readonly
def bse(self):
params = self.params
hess = self.model.hessian(params)
if len(params) == 1: # can't take an inverse, ensure 1d
return np.sqrt(-1./hess[0])
return np.sqrt(np.diag(-inv(hess)))
def cov_params(self): # add scale argument?
params = self.params
hess = self.model.hessian(params)
return -inv(hess)
@cache_readonly
def aic(self):
return -2 * self.llf + 2 * self._ic_df_model
@cache_readonly
def bic(self):
nobs = self.nobs
return -2 * self.llf + np.log(nobs) * self._ic_df_model
@cache_readonly
def hqic(self):
nobs = self.nobs
return -2 * self.llf + 2 * np.log(np.log(nobs)) * self._ic_df_model
@cache_readonly
def fittedvalues(self):
model = self.model
endog = model.endog.copy()
k_ar = self.k_ar
exog = model.exog # this is a copy
if exog is not None:
if model.method == "css" and k_ar > 0:
exog = exog[k_ar:]
if model.method == "css" and k_ar > 0:
endog = endog[k_ar:]
fv = endog - self.resid
# add deterministic part back in
#k = self.k_exog + self.k_trend
#TODO: this needs to be commented out for MLE with constant
#if k != 0:
# fv += dot(exog, self.params[:k])
return fv
@cache_readonly
def resid(self):
return self.model.geterrors(self.params)
@cache_readonly
def pvalues(self):
#TODO: same for conditional and unconditional?
df_resid = self.df_resid
return t.sf(np.abs(self.tvalues), df_resid) * 2
def predict(self, start=None, end=None, exog=None, dynamic=False):
return self.model.predict(self.params, start, end, exog, dynamic)
predict.__doc__ = _arma_results_predict
def _forecast_error(self, steps):
sigma2 = self.sigma2
ma_rep = arma2ma(np.r_[1, -self.arparams],
np.r_[1, self.maparams], nobs=steps)
fcasterr = np.sqrt(sigma2 * np.cumsum(ma_rep**2))
return fcasterr
def _forecast_conf_int(self, forecast, fcasterr, alpha):
const = norm.ppf(1 - alpha / 2.)
conf_int = np.c_[forecast - const * fcasterr,
forecast + const * fcasterr]
return conf_int
def forecast(self, steps=1, exog=None, alpha=.05):
"""
Out-of-sample forecasts
Parameters
----------
steps : int
The number of out of sample forecasts from the end of the
sample.
exog : array
If the model is an ARMAX, you must provide out of sample
values for the exogenous variables. This should not include
the constant.
alpha : float
The confidence intervals for the forecasts are (1 - alpha) %
Returns
-------
forecast : array
Array of out of sample forecasts
stderr : array
Array of the standard error of the forecasts.
conf_int : array
2d array of the confidence interval for the forecast
"""
if exog is not None:
#TODO: make a convenience function for this. we're using the
# pattern elsewhere in the codebase
exog = np.asarray(exog)
if self.k_exog == 1 and exog.ndim == 1:
exog = exog[:, None]
elif exog.ndim == 1:
if len(exog) != self.k_exog:
raise ValueError("1d exog given and len(exog) != k_exog")
exog = exog[None, :]
if exog.shape[0] != steps:
raise ValueError("new exog needed for each step")
# prepend in-sample exog observations
exog = np.vstack((self.model.exog[-self.k_ar:, self.k_trend:],
exog))
forecast = _arma_predict_out_of_sample(self.params,
steps, self.resid, self.k_ar,
self.k_ma, self.k_trend,
self.k_exog, self.model.endog,
exog, method=self.model.method)
# compute the standard errors
fcasterr = self._forecast_error(steps)
conf_int = self._forecast_conf_int(forecast, fcasterr, alpha)
return forecast, fcasterr, conf_int
def summary(self, alpha=.05):
"""Summarize the Model
Parameters
----------
alpha : float, optional
Significance level for the confidence intervals.
Returns
-------
smry : Summary instance
This holds the summary table and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary
"""
from statsmodels.iolib.summary import Summary
model = self.model
title = model.__class__.__name__ + ' Model Results'
method = model.method
# get sample TODO: make better sample machinery for estimation
k_diff = getattr(self, 'k_diff', 0)
if 'mle' in method:
start = k_diff
else:
start = k_diff + self.k_ar
if self.data.dates is not None:
dates = self.data.dates
sample = [dates[start].strftime('%m-%d-%Y')]
sample += ['- ' + dates[-1].strftime('%m-%d-%Y')]
else:
sample = str(start) + ' - ' + str(len(self.data.orig_endog))
k_ar, k_ma = self.k_ar, self.k_ma
if not k_diff:
order = str((k_ar, k_ma))
else:
order = str((k_ar, k_diff, k_ma))
top_left = [('Dep. Variable:', None),
('Model:', [model.__class__.__name__ + order]),
('Method:', [method]),
('Date:', None),
('Time:', None),
('Sample:', [sample[0]]),
('', [sample[1]])
]
top_right = [
('No. Observations:', [str(len(self.model.endog))]),
('Log Likelihood', ["%#5.3f" % self.llf]),
('S.D. of innovations', ["%#5.3f" % self.sigma2**.5]),
('AIC', ["%#5.3f" % self.aic]),
('BIC', ["%#5.3f" % self.bic]),
('HQIC', ["%#5.3f" % self.hqic])]
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
title=title)
smry.add_table_params(self, alpha=alpha, use_t=False)
# Make the roots table
from statsmodels.iolib.table import SimpleTable
if k_ma and k_ar:
arstubs = ["AR.%d" % i for i in range(1, k_ar + 1)]
mastubs = ["MA.%d" % i for i in range(1, k_ma + 1)]
stubs = arstubs + mastubs
roots = np.r_[self.arroots, self.maroots]
freq = np.r_[self.arfreq, self.mafreq]
elif k_ma:
mastubs = ["MA.%d" % i for i in range(1, k_ma + 1)]
stubs = mastubs
roots = self.maroots
freq = self.mafreq
elif k_ar:
arstubs = ["AR.%d" % i for i in range(1, k_ar + 1)]
stubs = arstubs
roots = self.arroots
freq = self.arfreq
else: # 0,0 model
stubs = []
if len(stubs): # not 0, 0
modulus = np.abs(roots)
data = np.column_stack((roots.real, roots.imag, modulus, freq))
roots_table = SimpleTable(data,
headers=[' Real',
' Imaginary',
' Modulus',
' Frequency'],
title="Roots",
stubs=stubs,
data_fmts=["%17.4f", "%+17.4fj",
"%17.4f", "%17.4f"])
smry.tables.append(roots_table)
return smry
def summary2(self, title=None, alpha=.05, float_format="%.4f"):
"""Experimental summary function for ARIMA Results
Parameters
-----------
title : string, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
float_format: string
print format for floats in parameters summary
Returns
-------
smry : Summary instance
This holds the summary table and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary : class to hold summary
results
"""
from pandas import DataFrame
# get sample TODO: make better sample machinery for estimation
k_diff = getattr(self, 'k_diff', 0)
if 'mle' in self.model.method:
start = k_diff
else:
start = k_diff + self.k_ar
if self.data.dates is not None:
dates = self.data.dates
sample = [dates[start].strftime('%m-%d-%Y')]
sample += [dates[-1].strftime('%m-%d-%Y')]
else:
sample = str(start) + ' - ' + str(len(self.data.orig_endog))
k_ar, k_ma = self.k_ar, self.k_ma
# Roots table
if k_ma and k_ar:
arstubs = ["AR.%d" % i for i in range(1, k_ar + 1)]
mastubs = ["MA.%d" % i for i in range(1, k_ma + 1)]
stubs = arstubs + mastubs
roots = np.r_[self.arroots, self.maroots]
freq = np.r_[self.arfreq, self.mafreq]
elif k_ma:
mastubs = ["MA.%d" % i for i in range(1, k_ma + 1)]
stubs = mastubs
roots = self.maroots
freq = self.mafreq
elif k_ar:
arstubs = ["AR.%d" % i for i in range(1, k_ar + 1)]
stubs = arstubs
roots = self.arroots
freq = self.arfreq
else: # 0, 0 order
stubs = []
if len(stubs):
modulus = np.abs(roots)
data = np.column_stack((roots.real, roots.imag, modulus, freq))
data = DataFrame(data)
data.columns = ['Real', 'Imaginary', 'Modulus', 'Frequency']
data.index = stubs
# Summary
from statsmodels.iolib import summary2
smry = summary2.Summary()
# Model info
model_info = summary2.summary_model(self)
model_info['Method:'] = self.model.method
model_info['Sample:'] = sample[0]
model_info[' '] = sample[-1]
model_info['S.D. of innovations:'] = "%#5.3f" % self.sigma2**.5
model_info['HQIC:'] = "%#5.3f" % self.hqic
model_info['No. Observations:'] = str(len(self.model.endog))
# Parameters
params = summary2.summary_params(self)
smry.add_dict(model_info)
smry.add_df(params, float_format=float_format)
if len(stubs):
smry.add_df(data, float_format="%17.4f")
smry.add_title(results=self, title=title)
return smry
def plot_predict(self, start=None, end=None, exog=None, dynamic=False,
alpha=.05, plot_insample=True, ax=None):
from statsmodels.graphics.utils import _import_mpl, create_mpl_ax
_ = _import_mpl()
fig, ax = create_mpl_ax(ax)
# use predict so you set dates
forecast = self.predict(start, end, exog, dynamic)
# doing this twice. just add a plot keyword to predict?
start = self.model._get_predict_start(start, dynamic=False)
end, out_of_sample = self.model._get_predict_end(end, dynamic=False)
if out_of_sample:
steps = out_of_sample
fc_error = self._forecast_error(steps)
conf_int = self._forecast_conf_int(forecast[-steps:], fc_error,
alpha)
if hasattr(self.data, "predict_dates"):
from pandas import TimeSeries
forecast = TimeSeries(forecast, index=self.data.predict_dates)
ax = forecast.plot(ax=ax, label='forecast')
else:
ax.plot(forecast)
x = ax.get_lines()[-1].get_xdata()
if out_of_sample:
label = "{0:.0%} confidence interval".format(1 - alpha)
ax.fill_between(x[-out_of_sample:], conf_int[:, 0], conf_int[:, 1],
color='gray', alpha=.5, label=label)
if plot_insample:
ax.plot(x[:end + 1 - start], self.model.endog[start:end+1],
label=self.model.endog_names)
ax.legend(loc='best')
return fig
plot_predict.__doc__ = _plot_predict
class ARMAResultsWrapper(wrap.ResultsWrapper):
_attrs = {}
_wrap_attrs = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_methods,
_methods)
wrap.populate_wrapper(ARMAResultsWrapper, ARMAResults)
class ARIMAResults(ARMAResults):
def predict(self, start=None, end=None, exog=None, typ='linear',
dynamic=False):
return self.model.predict(self.params, start, end, exog, typ, dynamic)
predict.__doc__ = _arima_results_predict
def _forecast_error(self, steps):
sigma2 = self.sigma2
ma_rep = arma2ma(np.r_[1, -self.arparams],
np.r_[1, self.maparams], nobs=steps)
fcerr = np.sqrt(np.cumsum(cumsum_n(ma_rep, self.k_diff)**2)*sigma2)
return fcerr
def _forecast_conf_int(self, forecast, fcerr, alpha):
const = norm.ppf(1 - alpha/2.)
conf_int = np.c_[forecast - const*fcerr, forecast + const*fcerr]
return conf_int
def forecast(self, steps=1, exog=None, alpha=.05):
"""
Out-of-sample forecasts
Parameters
----------
steps : int
The number of out of sample forecasts from the end of the
sample.
exog : array
If the model is an ARIMAX, you must provide out of sample
values for the exogenous variables. This should not include
the constant.
alpha : float
The confidence intervals for the forecasts are (1 - alpha) %
Returns
-------
forecast : array
Array of out of sample forecasts
stderr : array
Array of the standard error of the forecasts.
conf_int : array
2d array of the confidence interval for the forecast
Notes
-----
Prediction is done in the levels of the original endogenous variable.
If you would like prediction of differences in levels use `predict`.
"""
if exog is not None:
if self.k_exog == 1 and exog.ndim == 1:
exog = exog[:, None]
if exog.shape[0] != steps:
raise ValueError("new exog needed for each step")
# prepend in-sample exog observations
exog = np.vstack((self.model.exog[-self.k_ar:, self.k_trend:],
exog))
forecast = _arma_predict_out_of_sample(self.params, steps, self.resid,
self.k_ar, self.k_ma,
self.k_trend, self.k_exog,
self.model.endog,
exog, method=self.model.method)
d = self.k_diff
endog = self.model.data.endog[-d:]
forecast = unintegrate(forecast, unintegrate_levels(endog, d))[d:]
# get forecast errors
fcerr = self._forecast_error(steps)
conf_int = self._forecast_conf_int(forecast, fcerr, alpha)
return forecast, fcerr, conf_int
def plot_predict(self, start=None, end=None, exog=None, dynamic=False,
alpha=.05, plot_insample=True, ax=None):
from statsmodels.graphics.utils import _import_mpl, create_mpl_ax
_ = _import_mpl()
fig, ax = create_mpl_ax(ax)
# use predict so you set dates
forecast = self.predict(start, end, exog, 'levels', dynamic)
# doing this twice. just add a plot keyword to predict?
start = self.model._get_predict_start(start, dynamic=dynamic)
end, out_of_sample = self.model._get_predict_end(end, dynamic=dynamic)
if out_of_sample:
steps = out_of_sample
fc_error = self._forecast_error(steps)
conf_int = self._forecast_conf_int(forecast[-steps:], fc_error,
alpha)
if hasattr(self.data, "predict_dates"):
from pandas import TimeSeries
forecast = TimeSeries(forecast, index=self.data.predict_dates)
ax = forecast.plot(ax=ax, label='forecast')
else:
ax.plot(forecast)
x = ax.get_lines()[-1].get_xdata()
if out_of_sample:
label = "{0:.0%} confidence interval".format(1 - alpha)
ax.fill_between(x[-out_of_sample:], conf_int[:, 0], conf_int[:, 1],
color='gray', alpha=.5, label=label)
if plot_insample:
import re
k_diff = self.k_diff
label = re.sub("D\d*\.", "", self.model.endog_names)
levels = unintegrate(self.model.endog,
self.model._first_unintegrate)
ax.plot(x[:end + 1 - start],
levels[start + k_diff:end + k_diff + 1], label=label)
ax.legend(loc='best')
return fig
plot_predict.__doc__ = _arima_plot_predict
class ARIMAResultsWrapper(ARMAResultsWrapper):
pass
wrap.populate_wrapper(ARIMAResultsWrapper, ARIMAResults)
if __name__ == "__main__":
import statsmodels.api as sm
# simulate arma process
from statsmodels.tsa.arima_process import arma_generate_sample
y = arma_generate_sample([1., -.75], [1., .25], nsample=1000)
arma = ARMA(y)
res = arma.fit(trend='nc', order=(1, 1))
np.random.seed(12345)
y_arma22 = arma_generate_sample([1., -.85, .35], [1, .25, -.9],
nsample=1000)
arma22 = ARMA(y_arma22)
res22 = arma22.fit(trend='nc', order=(2, 2))
# test CSS
arma22_css = ARMA(y_arma22)
res22css = arma22_css.fit(trend='nc', order=(2, 2), method='css')
data = sm.datasets.sunspots.load()
ar = ARMA(data.endog)
resar = ar.fit(trend='nc', order=(9, 0))
y_arma31 = arma_generate_sample([1, -.75, -.35, .25], [.1],
nsample=1000)
arma31css = ARMA(y_arma31)
res31css = arma31css.fit(order=(3, 1), method="css", trend="nc",
transparams=True)
y_arma13 = arma_generate_sample([1., -.75], [1, .25, -.5, .8],
nsample=1000)
arma13css = ARMA(y_arma13)
res13css = arma13css.fit(order=(1, 3), method='css', trend='nc')
# check css for p < q and q < p
y_arma41 = arma_generate_sample([1., -.75, .35, .25, -.3], [1, -.35],
nsample=1000)
arma41css = ARMA(y_arma41)
res41css = arma41css.fit(order=(4, 1), trend='nc', method='css')
y_arma14 = arma_generate_sample([1, -.25], [1., -.75, .35, .25, -.3],
nsample=1000)
arma14css = ARMA(y_arma14)
res14css = arma14css.fit(order=(4, 1), trend='nc', method='css')
# ARIMA Model
from statsmodels.datasets import webuse
dta = webuse('wpi1')
wpi = dta['wpi']
mod = ARIMA(wpi, (1, 1, 1)).fit()
|
metinsay/docluster | refs/heads/master | docluster/models/classification/perceptron.py | 1 | import numpy as np
class Perceptron(object):
def __init__(self, n_iterations=10, kind='standard'):
self.n_iterations = n_iterations
self.kind = kind
def train(self, data, labels):
n_data, n_features = data.shape
weights = np.zeros(n_features)
offset = 0
if self.kind == 'standard':
for _ in range(self.n_iterations):
for feature, label in zip(data, labels):
(weights, offset) = self._update_weights(
weights, offset, feature, label)
self.weights, self.offset = weights, offset
elif self.kind == 'average':
sum_weights = np.zeros(n_features)
sum_offset = 0.
for _ in range(self.n_iterations):
for feature_vector, label in zip(data, labels):
(weights, offset) = self._update_weights(
feature_vector, label, weights, offset)
sum_theta = np.add(sum_theta, weights)
sum_offset += offset
self.weights, self.offset = sum_theta / \
(n_data * self.n_iterations), sum_offset / (n_data * self.n_iterations)
else:
None # TODO: Give error
return self.weights, self.offset
def _update_weights(self, weights, offset, feature, label):
if label * (np.dot(feature, weights) + offset) <= 0:
weights = np.add(weights, label * feature)
offset = offset + label
return (weights, offset)
def fit(self, data):
n_data = data.shape[0]
tiled_weights = np.tile(self.weights, (n_data, 1))
print(tiled_weights, data)
evaled_data = np.einsum('ij,ij->i', tiled_weights, data) + self.offset
return (evaled_data <= 0).astype('int64')
|
ivh/VAMDC-VALD | refs/heads/master | nodes/ethylene/node/urls.py | 59 | # Optional:
# Use this file to connect views from views.py in the same
# directory to their URLs.
#from django.conf.urls.defaults import *
#from django.conf import settings
#urlpatterns = patterns(settings.NODENAME+'.node.views',
# (r'^$', 'index'),
# )
|
HalcyonChimera/osf.io | refs/heads/develop | addons/figshare/models.py | 14 | # -*- coding: utf-8 -*-
import markupsafe
from addons.base.models import (BaseOAuthNodeSettings, BaseOAuthUserSettings,
BaseStorageAddon)
from django.db import models
from framework.auth import Auth
from framework.exceptions import HTTPError
from osf.models.external import ExternalProvider
from osf.models.files import File, Folder, BaseFileNode
from addons.base import exceptions
from addons.figshare import settings as figshare_settings
from addons.figshare import messages
from addons.figshare.client import FigshareClient
from addons.figshare.serializer import FigshareSerializer
class FigshareFileNode(BaseFileNode):
_provider = 'figshare'
class FigshareFolder(FigshareFileNode, Folder):
pass
class FigshareFile(FigshareFileNode, File):
version_identifier = 'ref'
@property
def _hashes(self):
# figshare API doesn't provide this metadata
return None
def update(self, revision, data, user=None, save=True):
"""Figshare does not support versioning.
Always pass revision as None to avoid conflict.
Call super to update _history and last_touched anyway.
"""
version = super(FigshareFile, self).update(None, data, user=user, save=save)
# Draft files are not renderable
if data['extra']['status'] == 'drafts':
return (version, u"""
<style>
.file-download{{display: none;}}
.file-share{{display: none;}}
</style>
<div class="alert alert-info" role="alert">
The file "{name}" is still a draft on figshare. <br>
To view it on the OSF
<a href="https://support.figshare.com/support/solutions">publish</a>
it on figshare.
</div>
""".format(name=markupsafe.escape(self.name)))
return version
class FigshareProvider(ExternalProvider):
name = 'figshare'
short_name = 'figshare'
client_id = figshare_settings.CLIENT_ID
client_secret = figshare_settings.CLIENT_SECRET
auth_url_base = figshare_settings.FIGSHARE_OAUTH_AUTH_ENDPOINT
callback_url = figshare_settings.FIGSHARE_OAUTH_TOKEN_ENDPOINT
auto_refresh_url = callback_url
# refresh_time = settings.REFRESH_TIME # TODO: maybe
# expiry_time = settings.EXPIRY_TIME
default_scopes = ['all']
def handle_callback(self, response):
"""View called when the Oauth flow is completed. Adds a new BoxUserSettings
record to the user and saves the user's access token and account info.
"""
client = FigshareClient(response['access_token'])
about = client.userinfo()
return {
'provider_id': about['id'],
'display_name': u'{} {}'.format(about['first_name'], about.get('last_name')),
}
class UserSettings(BaseOAuthUserSettings):
"""Stores user-specific figshare information
"""
oauth_provider = FigshareProvider
serializer = FigshareSerializer
class NodeSettings(BaseOAuthNodeSettings, BaseStorageAddon):
oauth_provider = FigshareProvider
serializer = FigshareSerializer
folder_id = models.TextField(blank=True, null=True)
folder_name = models.TextField(blank=True, null=True)
folder_path = models.TextField(blank=True, null=True)
user_settings = models.ForeignKey(UserSettings, null=True, blank=True, on_delete=models.CASCADE)
_api = None
@property
def api(self):
"""authenticated ExternalProvider instance"""
if self._api is None:
self._api = FigshareProvider(self.external_account)
return self._api
def fetch_folder_name(self):
return u'{0}:{1}'.format(self.folder_name or 'Unnamed {0}'.format(self.folder_path or ''), self.folder_id)
def fetch_full_folder_path(self):
return self.folder_name
def get_folders(self, **kwargs):
return FigshareClient(self.external_account.oauth_key).get_folders()
def archive_errors(self):
items = []
if self.folder_path in ('article', 'fileset'):
article = FigshareClient(self.external_account.oauth_key).article(self.folder_id)
items = [article]
else:
project = FigshareClient(self.external_account.oauth_key).project(self.folder_id)
items = project['articles'] if project else []
private = any(
[item for item in items if item['status'].lower() != 'public']
)
if private:
return 'The figshare {folder_path} <strong>{folder_name}</strong> contains private content that we cannot copy to the registration. If this content is made public on figshare we should then be able to copy those files. You can view those files <a href="{url}" target="_blank">here.</a>'.format(
folder_path=markupsafe.escape(self.folder_path),
folder_name=markupsafe.escape(self.folder_name),
url=self.owner.web_url_for('collect_file_trees'))
def clear_settings(self):
self.folder_id = None
self.folder_name = None
self.folder_path = None
def deauthorize(self, auth=None, add_log=True):
"""Remove user authorization from this node and log the event."""
self.clear_settings()
if add_log:
self.nodelogger.log(action='node_deauthorized', save=True)
self.clear_auth()
def serialize_waterbutler_credentials(self):
if not self.has_auth:
raise exceptions.AddonError('Addon is not authorized')
try:
# FigshareProvider(self.external_account).refresh_oauth_key() # TODO: Maybe
return {'token': self.external_account.oauth_key}
except Exception as error: # TODO: specific exception
raise HTTPError(error.status_code, data={'message_long': error.message})
def serialize_waterbutler_settings(self):
if not self.folder_path or not self.folder_id:
raise exceptions.AddonError('Folder is not configured')
return {
'container_type': self.folder_path,
'container_id': str(self.folder_id),
}
def create_waterbutler_log(self, auth, action, metadata):
url = self.owner.web_url_for('addon_view_or_download_file', path=metadata['path'], provider='figshare')
self.owner.add_log(
'figshare_{0}'.format(action),
auth=auth,
params={
'project': self.owner.parent_id,
'node': self.owner._id,
'path': metadata['materialized'],
'filename': metadata['materialized'].strip('/'),
'urls': {
'view': url,
'download': url + '?action=download'
},
},
)
def set_folder(self, folder_id, auth):
try:
info = FigshareClient(self.external_account.oauth_key).get_linked_folder_info(folder_id)
except HTTPError as e:
raise exceptions.InvalidFolderError(e.message)
self.folder_id = info['id']
self.folder_name = info['name']
self.folder_path = info['path']
self.save()
self.nodelogger.log(action='folder_selected', save=True)
#############
# Callbacks #
#############
def after_delete(self, user=None):
self.deauthorize(Auth(user=user), add_log=True)
self.save()
def on_delete(self):
self.deauthorize(add_log=False)
self.save()
def before_page_load(self, node, user):
"""
:param Node node:
:param User user:
:return str: Alert message
"""
if not self.configured:
return []
figshare = node.get_addon('figshare')
# Quit if no user authorization
node_permissions = 'public' if node.is_public else 'private'
if figshare.folder_path == 'project':
if node_permissions == 'private':
message = messages.BEFORE_PAGE_LOAD_PRIVATE_NODE_MIXED_FS.format(category=node.project_or_component, project_id=figshare.folder_id)
return [message]
else:
message = messages.BEFORE_PAGE_LOAD_PUBLIC_NODE_MIXED_FS.format(category=node.project_or_component, project_id=figshare.folder_id)
connect = FigshareClient(self.external_account.oauth_key)
try:
project_is_public = connect.container_is_public(self.folder_id, self.folder_path)
except HTTPError as e:
if e.code == 403:
return [messages.OAUTH_INVALID]
elif e.code == 500:
return [messages.FIGSHARE_INTERNAL_SERVER_ERROR]
else:
return [messages.FIGSHARE_UNSPECIFIED_ERROR.format(error_message=e.message)]
article_permissions = 'public' if project_is_public else 'private'
if article_permissions != node_permissions:
message = messages.BEFORE_PAGE_LOAD_PERM_MISMATCH.format(
category=node.project_or_component,
node_perm=node_permissions,
figshare_perm=article_permissions,
figshare_id=self.folder_id,
folder_type=self.folder_path,
)
if article_permissions == 'private' and node_permissions == 'public':
message += messages.BEFORE_PAGE_LOAD_PUBLIC_NODE_PRIVATE_FS.format(folder_type=self.folder_path)
# No HTML snippets, so escape message all at once
return [markupsafe.escape(message)]
|
marcelloceschia/asterisk-11-extended_codec | refs/heads/master | res/pjproject/tests/pjsua/scripts-call/300_ice_1_0.py | 3 | # $Id: 300_ice_1_0.py 369517 2012-07-01 17:28:57Z file $
#
from inc_cfg import *
# ICE mismatch
test_param = TestParam(
"Callee=use ICE, caller=no ICE",
[
InstanceParam("callee", "--null-audio --use-ice --max-calls=1"),
InstanceParam("caller", "--null-audio --max-calls=1")
]
)
|
django-fluent/django-fluent-comments | refs/heads/master | fluent_comments/email.py | 2 | from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.utils.encoding import force_text
from fluent_comments import appsettings
def send_comment_posted(comment, request):
"""
Send the email to staff that an comment was posted.
While the django_comments module has email support,
it doesn't pass the 'request' to the context.
This also changes the subject to show the page title.
"""
recipient_list = [manager_tuple[1] for manager_tuple in settings.MANAGERS]
site = get_current_site(request)
content_object = comment.content_object
content_title = force_text(content_object)
if comment.is_removed:
subject = u'[{0}] Spam comment on "{1}"'.format(site.name, content_title)
elif not comment.is_public:
subject = u'[{0}] Moderated comment on "{1}"'.format(site.name, content_title)
else:
subject = u'[{0}] New comment posted on "{1}"'.format(site.name, content_title)
context = {
'site': site,
'comment': comment,
'content_object': content_object
}
message = render_to_string("comments/comment_notification_email.txt", context, request=request)
if appsettings.FLUENT_COMMENTS_MULTIPART_EMAILS:
html_message = render_to_string("comments/comment_notification_email.html", context, request=request)
else:
html_message = None
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL,
recipient_list, fail_silently=True, html_message=html_message)
|
translate/pootle | refs/heads/master | pootle/apps/pootle_score/providers.py | 7 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from pootle.core.delegate import event_score
from pootle.core.plugin import provider
from pootle_log.utils import LogEvent
from . import scores
@provider(event_score, sender=LogEvent)
def event_log_score_provider(**kwargs_):
return dict(
suggestion_created=scores.SuggestionCreatedScore,
suggestion_accepted=scores.SuggestionAcceptedScore,
suggestion_rejected=scores.SuggestionRejectedScore,
target_updated=scores.TargetUpdatedScore,
state_updated=scores.StateUpdatedScore,
comment_updated=scores.CommentUpdatedScore)
|
yosshy/nova | refs/heads/master | nova/api/openstack/compute/server_usage.py | 23 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
ALIAS = "os-server-usage"
authorize = extensions.os_compute_soft_authorizer(ALIAS)
resp_topic = "OS-SRV-USG"
class ServerUsageController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(ServerUsageController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
def _extend_server(self, server, instance):
for k in ['launched_at', 'terminated_at']:
key = "%s:%s" % (resp_topic, k)
# NOTE(danms): Historically, this timestamp has been generated
# merely by grabbing str(datetime) of a TZ-naive object. The
# only way we can keep that with instance objects is to strip
# the tzinfo from the stamp and str() it.
server[key] = (instance[k].replace(tzinfo=None)
if instance[k] else None)
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
server = resp_obj.obj['server']
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'show' method.
self._extend_server(server, db_instance)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
servers = list(resp_obj.obj['servers'])
for server in servers:
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'detail' method.
self._extend_server(server, db_instance)
class ServerUsage(extensions.V21APIExtensionBase):
"""Adds launched_at and terminated_at on Servers."""
name = "ServerUsage"
alias = ALIAS
version = 1
def get_controller_extensions(self):
controller = ServerUsageController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
def get_resources(self):
return []
|
nan86150/ImageFusion | refs/heads/master | lib/python2.7/site-packages/pip/_vendor/progress/__init__.py | 916 | # Copyright (c) 2012 Giorgos Verigakis <[email protected]>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import division
from collections import deque
from datetime import timedelta
from math import ceil
from sys import stderr
from time import time
__version__ = '1.2'
class Infinite(object):
file = stderr
sma_window = 10
def __init__(self, *args, **kwargs):
self.index = 0
self.start_ts = time()
self._ts = self.start_ts
self._dt = deque(maxlen=self.sma_window)
for key, val in kwargs.items():
setattr(self, key, val)
def __getitem__(self, key):
if key.startswith('_'):
return None
return getattr(self, key, None)
@property
def avg(self):
return sum(self._dt) / len(self._dt) if self._dt else 0
@property
def elapsed(self):
return int(time() - self.start_ts)
@property
def elapsed_td(self):
return timedelta(seconds=self.elapsed)
def update(self):
pass
def start(self):
pass
def finish(self):
pass
def next(self, n=1):
if n > 0:
now = time()
dt = (now - self._ts) / n
self._dt.append(dt)
self._ts = now
self.index = self.index + n
self.update()
def iter(self, it):
for x in it:
yield x
self.next()
self.finish()
class Progress(Infinite):
def __init__(self, *args, **kwargs):
super(Progress, self).__init__(*args, **kwargs)
self.max = kwargs.get('max', 100)
@property
def eta(self):
return int(ceil(self.avg * self.remaining))
@property
def eta_td(self):
return timedelta(seconds=self.eta)
@property
def percent(self):
return self.progress * 100
@property
def progress(self):
return min(1, self.index / self.max)
@property
def remaining(self):
return max(self.max - self.index, 0)
def start(self):
self.update()
def goto(self, index):
incr = index - self.index
self.next(incr)
def iter(self, it):
try:
self.max = len(it)
except TypeError:
pass
for x in it:
yield x
self.next()
self.finish()
|
rspavel/spack | refs/heads/develop | var/spack/repos/builtin/packages/logstash/package.py | 5 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Logstash(Package):
"""
Logstash is part of the Elastic Stack along with Beats, Elasticsearch
and Kibana. Logstash is a server-side data processing pipeline that
ingests data from a multitude of sources simultaneously, transforms it,
and then sends it to your favorite "stash.
"""
homepage = "https://artifacts.elastic.co"
url = "https://artifacts.elastic.co/downloads/logstash/logstash-6.6.0.tar.gz"
version('6.6.0', sha256='5a9a8b9942631e9d4c3dfb8d47075276e8c2cff343841145550cc0c1cfe7bba7')
def install(self, spec, prefix):
install_tree('.', prefix)
|
ketjow4/NOV | refs/heads/master | Lib/site-packages/scipy/cluster/tests/vq_test.py | 63 | import numpy as np
from scipy.cluster import vq
def python_vq(all_data,code_book):
import time
t1 = time.time()
codes1,dist1 = vq.vq(all_data,code_book)
t2 = time.time()
#print 'fast (double):', t2 - t1
#print ' first codes:', codes1[:5]
#print ' first dist:', dist1[:5]
#print ' last codes:', codes1[-5:]
#print ' last dist:', dist1[-5:]
float_obs = all_data.astype(np.float32)
float_code = code_book.astype(np.float32)
t1 = time.time()
codes1,dist1 = vq.vq(float_obs,float_code)
t2 = time.time()
#print 'fast (float):', t2 - t1
#print ' first codes:', codes1[:5]
#print ' first dist:', dist1[:5]
#print ' last codes:', codes1[-5:]
#print ' last dist:', dist1[-5:]
return codes1,dist1
def read_data(name):
f = open(name,'r')
data = []
for line in f.readlines():
data.append(map(float,string.split(line)))
f.close()
return array(data)
def main():
np.random.seed((1000,1000))
Ncodes = 40
Nfeatures = 16
Nobs = 4000
code_book = np.random.normal(0,1,(Ncodes,Nfeatures))
features = np.random.normal(0,1,(Nobs,Nfeatures))
codes,dist = python_vq(features,code_book)
if __name__ == '__main__':
main()
|
Skeletrox/usb-backend-pinut | refs/heads/master | file_upload/changepermissions/tests.py | 873 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
# Create your tests here.
|
Subsets and Splits