prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>to_str.rs<|end_file_name|><|fim▁begin|>use ::ffi;
use ::libc::c_char;
use std::ffi::CStr;
// To-string converters
// See http://www.libnfc.org/api/group__string-converter.html
/// Converts nfc_modulation_type value to string
pub fn modulation_type(pnd: ffi::nfc_modulation_type) -> &'static str {
unsafe {
let modulation_type = CStr::from_ptr(ffi::str_nfc_modulation_type(pnd)).to_str().unwrap();
modulation_type
}
}
/// Converts nfc_baud_rate value to string
pub fn baud_rate(baud_rate: ffi::nfc_baud_rate) -> &'static str {
unsafe {
let baud_rate = CStr::from_ptr(ffi::str_nfc_baud_rate(baud_rate)).to_str().unwrap();
baud_rate
}
}
/// Returns the number of characters printed<|fim▁hole|><|fim▁end|>
|
pub fn target(buf: *mut *mut c_char, pnt: *mut ffi::nfc_target, verbose: u8) -> i32 {
unsafe { ffi::str_nfc_target(buf, pnt, verbose) }
}
|
<|file_name|>settings.py<|end_file_name|><|fim▁begin|># Django settings for freudiancommits project.
import os
DEBUG = True if os.environ.get('DJANGO_DEBUG', None) == '1' else False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
import dj_database_url
DATABASES = {'default': dj_database_url.config()}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = os.environ['DJANGO_SECRET_KEY']
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
# Don't require email addresses
SOCIALACCOUNT_EMAIL_REQUIRED = False
SOCIALACCOUNT_EMAIL_VERIFICATION = 'none'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'allauth.account.context_processors.account',
'allauth.socialaccount.context_processors.socialaccount',
'django.core.context_processors.request',
'django.contrib.auth.context_processors.auth'
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'freudiancommits.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'freudiancommits.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'freudiancommits.main',
'freudiancommits.github',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.github',
'south',
'gunicorn',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',<|fim▁hole|> 'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
if 'AWS_STORAGE_BUCKET_NAME' in os.environ:
AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
AWS_STORAGE_BUCKET_NAME = os.environ['AWS_STORAGE_BUCKET_NAME']
AWS_S3_CUSTOM_DOMAIN = AWS_STORAGE_BUCKET_NAME
STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
DEFAULT_FILE_STORAGE = 's3_folder_storage.s3.DefaultStorage'
DEFAULT_S3_PATH = 'media'
STATICFILES_STORAGE = 's3_folder_storage.s3.StaticStorage'
STATIC_S3_PATH = 'static'
AWS_S3_SECURE_URLS = False
AWS_QUERYSTRING_AUTH = False
MEDIA_ROOT = '/%s/' % DEFAULT_S3_PATH
MEDIA_URL = '//%s/%s/' % \
(AWS_STORAGE_BUCKET_NAME, DEFAULT_S3_PATH)
STATIC_ROOT = '/%s/' % STATIC_S3_PATH
STATIC_URL = '//%s/%s/' % \
(AWS_STORAGE_BUCKET_NAME, STATIC_S3_PATH)
ADMIN_MEDIA_PREFIX = STATIC_URL + 'admin/'
LOGIN_REDIRECT_URL = '/github/loading/'<|fim▁end|>
|
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
|
<|file_name|>flags.py<|end_file_name|><|fim▁begin|># flags.py
#
# Copyright (C) 2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): David Lehman <[email protected]>
#
import shlex
import selinux
from .util import open # pylint: disable=redefined-builtin
class Flags(object):
def __init__(self):
#
# mode of operation
#
self.testing = False
self.installer_mode = False
#
# minor modes (installer-specific)
#
self.automated_install = False
self.live_install = False
self.image_install = False
#
# enable/disable functionality
#
self.selinux = selinux.is_selinux_enabled()
self.multipath = True
self.dmraid = True
self.ibft = True
self.noiswmd = False
self.gfs2 = True
self.jfs = True
self.reiserfs = True
self.arm_platform = None
self.gpt = False
self.multipath_friendly_names = True
# set to False to suppress the default LVM behavior of saving
# backup metadata in /etc/lvm/{archive,backup}
self.lvm_metadata_backup = True
# whether to include nodev filesystems in the devicetree (only
# meaningful when flags.installer_mode is False)
self.include_nodev = False
self.boot_cmdline = {}
self.update_from_boot_cmdline()
self.allow_imperfect_devices = True
def get_boot_cmdline(self):
buf = open("/proc/cmdline").read().strip()
args = shlex.split(buf)
for arg in args:
(opt, _equals, val) = arg.partition("=")
if val:
self.boot_cmdline[opt] = val
def update_from_boot_cmdline(self):
self.get_boot_cmdline()
if "nompath" in self.boot_cmdline:
self.multipath = False
if "nodmraid" in self.boot_cmdline:
self.dmraid = False<|fim▁hole|> if "noiswmd" in self.boot_cmdline:
self.noiswmd = True
def update_from_anaconda_flags(self, anaconda_flags):
self.installer_mode = True
self.testing = anaconda_flags.testing
self.automated_install = anaconda_flags.automatedInstall
self.live_install = anaconda_flags.livecdInstall
self.image_install = anaconda_flags.imageInstall
self.selinux = anaconda_flags.selinux
self.gfs2 = "gfs2" in self.boot_cmdline
self.jfs = "jfs" in self.boot_cmdline
self.reiserfs = "reiserfs" in self.boot_cmdline
self.arm_platform = anaconda_flags.armPlatform
self.gpt = anaconda_flags.gpt
self.multipath_friendly_names = anaconda_flags.mpathFriendlyNames
self.allow_imperfect_devices = anaconda_flags.rescue_mode
self.ibft = anaconda_flags.ibft
self.dmraid = anaconda_flags.dmraid
# We don't want image installs writing backups of the *image* metadata
# into the *host's* /etc/lvm. This can get real messy on build systems.
if self.image_install:
self.lvm_metadata_backup = False
flags = Flags()<|fim▁end|>
| |
<|file_name|>install.js<|end_file_name|><|fim▁begin|>"use strict";
var sqlite3 = require('sqlite3');
var authHelper = require('./server/helpers/auth');
var db = new sqlite3.Database('./data/users.db');
db.serialize(function() {
db.run(
'CREATE TABLE "users" ('<|fim▁hole|> + '"id" INTEGER PRIMARY KEY AUTOINCREMENT,'
+ '"username" TEXT,'
+ '"password" TEXT'
+ ')'
);
db.run("INSERT INTO users('username', 'password') VALUES (?, ?)", ["admin", authHelper.hashPassword("teste")]);
});
db.close();<|fim▁end|>
| |
<|file_name|>ipaddr.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
#
# Copyright 2007 Google Inc.
# Licensed to PSF under a Contributor Agreement.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""A fast, lightweight IPv4/IPv6 manipulation library in Python.
This library is used to create/poke/manipulate IPv4 and IPv6 addresses
and networks.
"""
__version__ = 'trunk'
import struct
class AddressValueError(ValueError):
"""A Value Error related to the address."""
class NetmaskValueError(ValueError):
"""A Value Error related to the netmask."""
def IPAddress(address, version=None):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
version: An Integer, 4 or 6. If set, don't try to automatically
determine what the IP address type is. important for things
like IPAddress(1), which could be IPv4, '0.0.0.0.1', or IPv6,
'::1'.
Returns:
An IPv4Address or IPv6Address object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address.
"""
if version:
if version == 4:
return IPv4Address(address)
elif version == 6:
return IPv6Address(address)
try:
return IPv4Address(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Address(address)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %
address)
def IPNetwork(address, version=None, strict=False):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
version: An Integer, if set, don't try to automatically
determine what the IP address type is. important for things
like IPNetwork(1), which could be IPv4, '0.0.0.1/32', or IPv6,
'::1/128'.
Returns:
An IPv4Network or IPv6Network object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address. Or if a strict network was requested and a strict
network wasn't given.
"""
if version:
if version == 4:
return IPv4Network(address, strict)
elif version == 6:
return IPv6Network(address, strict)
try:
return IPv4Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError('%r does not appear to be an IPv4 or IPv6 network' %
address)
def _find_address_range(addresses):
"""Find a sequence of addresses.
Args:
addresses: a list of IPv4 or IPv6 addresses.
Returns:
A tuple containing the first and last IP addresses in the sequence.
"""
first = last = addresses[0]
for ip in addresses[1:]:
if ip._ip == last._ip + 1:
last = ip
else:
break
return (first, last)
def _get_prefix_length(number1, number2, bits):
"""Get the number of leading bits that are same for two numbers.
Args:
number1: an integer.
number2: another integer.
bits: the maximum number of bits to compare.
Returns:
The number of leading bits that are the same for two numbers.
"""
for i in range(bits):
if number1 >> i == number2 >> i:
return bits - i
return 0
def _count_righthand_zero_bits(number, bits):
"""Count the number of zero bits on the right hand side.
Args:
number: an integer.
bits: maximum number of bits to count.
Returns:
The number of zero bits on the right hand side of the number.
"""
if number == 0:
return bits
for i in range(bits):
if (number >> i) % 2:
return i
def summarize_address_range(first, last):
"""Summarize a network range given the first and last IP addresses.
Example:
>>> summarize_address_range(IPv4Address('1.1.1.0'),
IPv4Address('1.1.1.130'))
[IPv4Network('1.1.1.0/25'), IPv4Network('1.1.1.128/31'),
IPv4Network('1.1.1.130/32')]
Args:
first: the first IPv4Address or IPv6Address in the range.
last: the last IPv4Address or IPv6Address in the range.
Returns:
The address range collapsed to a list of IPv4Network's or
IPv6Network's.
Raise:
TypeError:
If the first and last objects are not IP addresses.
If the first and last objects are not the same version.
ValueError:
If the last object is not greater than the first.
If the version is not 4 or 6.
"""
if not (isinstance(first, _BaseIP) and isinstance(last, _BaseIP)):
raise TypeError('first and last must be IP addresses, not networks')
if first.version != last.version:
raise TypeError("%s and %s are not of the same version" % (
str(self), str(other)))
if first > last:
raise ValueError('last IP address must be greater than first')
networks = []
if first.version == 4:
ip = IPv4Network
elif first.version == 6:
ip = IPv6Network
else:
raise ValueError('unknown IP version')
ip_bits = first._max_prefixlen
first_int = first._ip
last_int = last._ip
while first_int <= last_int:
nbits = _count_righthand_zero_bits(first_int, ip_bits)
current = None
while nbits >= 0:
addend = 2**nbits - 1
current = first_int + addend
nbits -= 1
if current <= last_int:
break
prefix = _get_prefix_length(first_int, current, ip_bits)
net = ip('%s/%d' % (str(first), prefix))
networks.append(net)
if current == ip._ALL_ONES:
break
first_int = current + 1
first = IPAddress(first_int, version=first._version)
return networks
def _collapse_address_list_recursive(addresses):
"""Loops through the addresses, collapsing concurrent netblocks.
Example:
ip1 = IPv4Network'1.1.0.0/24')
ip2 = IPv4Network'1.1.1.0/24')
ip3 = IPv4Network'1.1.2.0/24')
ip4 = IPv4Network'1.1.3.0/24')
ip5 = IPv4Network'1.1.4.0/24')
ip6 = IPv4Network'1.1.0.1/22')
_collapse_address_list_recursive([ip1, ip2, ip3, ip4, ip5, ip6]) ->
[IPv4Network('1.1.0.0/22'), IPv4Network('1.1.4.0/24')]
This shouldn't be called directly; it is called via
collapse_address_list([]).
Args:
addresses: A list of IPv4Network's or IPv6Network's
Returns:
A list of IPv4Network's or IPv6Network's depending on what we were
passed.
"""
ret_array = []
optimized = False
for cur_addr in addresses:
if not ret_array:
ret_array.append(cur_addr)
continue
if cur_addr in ret_array[-1]:
optimized = True
elif cur_addr == ret_array[-1].supernet().subnet()[1]:
ret_array.append(ret_array.pop().supernet())
optimized = True
else:
ret_array.append(cur_addr)
if optimized:
return _collapse_address_list_recursive(ret_array)
return ret_array
def collapse_address_list(addresses):
"""Collapse a list of IP objects.
Example:
collapse_address_list([IPv4('1.1.0.0/24'), IPv4('1.1.1.0/24')]) ->
[IPv4('1.1.0.0/23')]
Args:
addresses: A list of IPv4Network or IPv6Network objects.
Returns:
A list of IPv4Network or IPv6Network objects depending on what we
were passed.
Raises:
TypeError: If passed a list of mixed version objects.
"""
i = 0
addrs = []
ips = []
nets = []
# split IP addresses and networks
for ip in addresses:
if isinstance(ip, _BaseIP):
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
str(ip), str(ips[-1])))
ips.append(ip)
elif ip._prefixlen == ip._max_prefixlen:
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
str(ip), str(ips[-1])))
ips.append(ip.ip)
else:
if nets and nets[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
str(ip), str(ips[-1])))
nets.append(ip)
# sort and dedup
ips = sorted(set(ips))
nets = sorted(set(nets))
while i < len(ips):
(first, last) = _find_address_range(ips[i:])
i = ips.index(last) + 1
addrs.extend(summarize_address_range(first, last))
return _collapse_address_list_recursive(sorted(
addrs + nets, key=_BaseNet._get_networks_key))
# backwards compatibility
CollapseAddrList = collapse_address_list
# Test whether this Python implementation supports byte objects that
# are not identical to str ones.
# We need to exclude platforms where bytes == str so that we can
# distinguish between packed representations and strings, for example
# b'12::' (the IPv4 address 49.50.58.58) and '12::' (an IPv6 address).
try:
_compat_has_real_bytes = bytes is not str
except NameError: # <Python2.6
_compat_has_real_bytes = False
def get_mixed_type_key(obj):
"""Return a key suitable for sorting between networks and addresses.
Address and Network objects are not sortable by default; they're
fundamentally different so the expression
IPv4Address('1.1.1.1') <= IPv4Network('1.1.1.1/24')
doesn't make any sense. There are some times however, where you may wish
to have ipaddr sort these for you anyway. If you need to do this, you
can use this function as the key= argument to sorted().
Args:
obj: either a Network or Address object.
Returns:
appropriate key.
"""
if isinstance(obj, _BaseNet):
return obj._get_networks_key()
elif isinstance(obj, _BaseIP):
return obj._get_address_key()
return NotImplemented
class _IPAddrBase(object):
"""The mother class."""
def __index__(self):
return self._ip
def __int__(self):
return self._ip
def __hex__(self):
return hex(self._ip)
@property
def exploded(self):
"""Return the longhand version of the IP address as a string."""
return self._explode_shorthand_ip_string()
@property
def compressed(self):
"""Return the shorthand version of the IP address as a string."""
return str(self)
class _BaseIP(_IPAddrBase):
"""A generic IP object.
This IP class contains the version independent methods which are
used by single IP addresses.
"""
def __init__(self, address):
if '/' in str(address):
raise AddressValueError(address)
def __eq__(self, other):
try:
return (self._ip == other._ip
and self._version == other._version)
except AttributeError:
return NotImplemented
def __ne__(self, other):
eq = self.__eq__(other)
if eq is NotImplemented:
return NotImplemented
return not eq
def __le__(self, other):
gt = self.__gt__(other)
if gt is NotImplemented:
return NotImplemented
return not gt
def __ge__(self, other):
lt = self.__lt__(other)
if lt is NotImplemented:
return NotImplemented
return not lt
def __lt__(self, other):
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
str(self), str(other)))
if not isinstance(other, _BaseIP):
raise TypeError('%s and %s are not of the same type' % (
str(self), str(other)))
if self._ip != other._ip:
return self._ip < other._ip
return False
def __gt__(self, other):
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
str(self), str(other)))
if not isinstance(other, _BaseIP):
raise TypeError('%s and %s are not of the same type' % (
str(self), str(other)))
if self._ip != other._ip:
return self._ip > other._ip
return False
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, str(self))
def __str__(self):
return '%s' % self._string_from_ip_int(self._ip)
def __hash__(self):
return hash(hex(self._ip))
def _get_address_key(self):
return (self._version, self)
@property
def version(self):
raise NotImplementedError('BaseIP has no version')
class _BaseNet(_IPAddrBase):
"""A generic IP object.
This IP class contains the version independent methods which are
used by networks.
"""
def __init__(self, address):
self._cache = {}
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, str(self))
def iterhosts(self):
"""Generate Iterator over usable hosts in a network.
This is like __iter__ except it doesn't return the network
or broadcast addresses.
"""
cur = int(self.network) + 1
bcast = int(self.broadcast) - 1
while cur <= bcast:
cur += 1
yield IPAddress(cur - 1, version=self._version)
def __iter__(self):
cur = int(self.network)
bcast = int(self.broadcast)
while cur <= bcast:
cur += 1
yield IPAddress(cur - 1, version=self._version)
def __getitem__(self, n):
network = int(self.network)
broadcast = int(self.broadcast)
if n >= 0:
if network + n > broadcast:
raise IndexError
return IPAddress(network + n, version=self._version)
else:
n += 1
if broadcast + n < network:
raise IndexError
return IPAddress(broadcast + n, version=self._version)
def __lt__(self, other):
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
str(self), str(other)))
if not isinstance(other, _BaseNet):
raise TypeError('%s and %s are not of the same type' % (
str(self), str(other)))
if self.network != other.network:
return self.network < other.network
if self.netmask != other.netmask:
return self.netmask < other.netmask
return False
def __gt__(self, other):
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
str(self), str(other)))
if not isinstance(other, _BaseNet):
raise TypeError('%s and %s are not of the same type' % (
str(self), str(other)))
if self.network != other.network:
return self.network > other.network
if self.netmask != other.netmask:
return self.netmask > other.netmask
return False
def __le__(self, other):
gt = self.__gt__(other)
if gt is NotImplemented:
return NotImplemented
return not gt
def __ge__(self, other):
lt = self.__lt__(other)
if lt is NotImplemented:
return NotImplemented
return not lt
def __eq__(self, other):
try:
return (self._version == other._version
and self.network == other.network
and int(self.netmask) == int(other.netmask))
except AttributeError:
return NotImplemented
def __ne__(self, other):
eq = self.__eq__(other)
if eq is NotImplemented:
return NotImplemented
return not eq
def __str__(self):
return '%s/%s' % (str(self.ip),
str(self._prefixlen))
def __hash__(self):
return hash(int(self.network) ^ int(self.netmask))
def __contains__(self, other):
# dealing with another network.
if isinstance(other, _BaseNet):
return (int(self.network) <= int(other._ip) and
int(self.broadcast) >= int(other.broadcast))
# dealing with another address
else:
return (int(self.network) <= int(other._ip) <=
int(self.broadcast))
def overlaps(self, other):
"""Tell if self is partly contained in other."""
return self.network in other or self.broadcast in other or (
other.network in self or other.broadcast in self)
@property
def network(self):
x = self._cache.get('network')
if x is None:
x = IPAddress(self._ip & int(self.netmask), version=self._version)
self._cache['network'] = x
return x
@property
def broadcast(self):
x = self._cache.get('broadcast')
if x is None:
x = IPAddress(self._ip | int(self.hostmask), version=self._version)
self._cache['broadcast'] = x
return x
@property
def hostmask(self):
x = self._cache.get('hostmask')
if x is None:
x = IPAddress(int(self.netmask) ^ self._ALL_ONES,
version=self._version)
self._cache['hostmask'] = x
return x
@property
def with_prefixlen(self):
return '%s/%d' % (str(self.ip), self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (str(self.ip), str(self.netmask))
@property
def with_hostmask(self):
return '%s/%s' % (str(self.ip), str(self.hostmask))
@property
def numhosts(self):
"""Number of hosts in the current subnet."""
return int(self.broadcast) - int(self.network) + 1
@property
def version(self):
raise NotImplementedError('BaseNet has no version')
@property
def prefixlen(self):
return self._prefixlen
def address_exclude(self, other):
"""Remove an address from a larger block.
For example:
addr1 = IP('10.1.1.0/24')
addr2 = IP('10.1.1.0/26')
addr1.address_exclude(addr2) =
[IP('10.1.1.64/26'), IP('10.1.1.128/25')]
or IPv6:
addr1 = IP('::1/32')
addr2 = IP('::1/128')
addr1.address_exclude(addr2) = [IP('::0/128'),
IP('::2/127'),
IP('::4/126'),
IP('::8/125'),
...
IP('0:0:8000::/33')]
Args:
other: An IP object of the same type.
Returns:
A sorted list of IP objects addresses which is self minus
other.
Raises:
TypeError: If self and other are of difffering address
versions.
ValueError: If other is not completely contained by self.
"""
if not self._version == other._version:
raise TypeError("%s and %s are not of the same version" % (
str(self), str(other)))
if other not in self:
raise ValueError('%s not contained in %s' % (str(other),
str(self)))
ret_addrs = []
# Make sure we're comparing the network of other.
other = IPNetwork('%s/%s' % (str(other.network), str(other.prefixlen)),
version=other._version)
s1, s2 = self.subnet()
while s1 != other and s2 != other:
if other in s1:
ret_addrs.append(s2)
s1, s2 = s1.subnet()
elif other in s2:
ret_addrs.append(s1)
s1, s2 = s2.subnet()
else:
# If we got here, there's a bug somewhere.
assert True == False, ('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(str(s1), str(s2), str(other)))
if s1 == other:
ret_addrs.append(s2)
elif s2 == other:
ret_addrs.append(s1)
else:
# If we got here, there's a bug somewhere.
assert True == False, ('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(str(s1), str(s2), str(other)))
return sorted(ret_addrs, key=_BaseNet._get_networks_key)
def compare_networks(self, other):
"""Compare two IP objects.
This is only concerned about the comparison of the integer
representation of the network addresses. This means that the
host bits aren't considered at all in this method. If you want
to compare host bits, you can easily enough do a
'HostA._ip < HostB._ip'
Args:
other: An IP object.
Returns:
If the IP versions of self and other are the same, returns:
-1 if self < other:
eg: IPv4('1.1.1.0/24') < IPv4('1.1.2.0/24')
IPv6('1080::200C:417A') < IPv6('1080::200B:417B')
0 if self == other
eg: IPv4('1.1.1.1/24') == IPv4('1.1.1.2/24')
IPv6('1080::200C:417A/96') == IPv6('1080::200C:417B/96')
1 if self > other
eg: IPv4('1.1.1.0/24') > IPv4('1.1.0.0/24')
IPv6('1080::1:200C:417A/112') >
IPv6('1080::0:200C:417A/112')
If the IP versions of self and other are different, returns:
-1 if self._version < other._version
eg: IPv4('10.0.0.1/24') < IPv6('::1/128')
1 if self._version > other._version
eg: IPv6('::1/128') > IPv4('255.255.255.0/24')
"""
if self._version < other._version:
return -1
if self._version > other._version:
return 1
# self._version == other._version below here:
if self.network < other.network:
return -1
if self.network > other.network:
return 1
# self.network == other.network below here:
if self.netmask < other.netmask:
return -1
if self.netmask > other.netmask:
return 1
# self.network == other.network and self.netmask == other.netmask
return 0
def _get_networks_key(self):
"""Network-only key function.
Returns an object that identifies this address' network and
netmask. This function is a suitable "key" argument for sorted()
and list.sort().
"""
return (self._version, self.network, self.netmask)
def _ip_int_from_prefix(self, prefixlen=None):
"""Turn the prefix length netmask into a int for comparison.
Args:
prefixlen: An integer, the prefix length.
Returns:
An integer.
"""
if not prefixlen and prefixlen != 0:
prefixlen = self._prefixlen
return self._ALL_ONES ^ (self._ALL_ONES >> prefixlen)
def _prefix_from_ip_int(self, ip_int, mask=32):
"""Return prefix length from the decimal netmask.
Args:
ip_int: An integer, the IP address.
mask: The netmask. Defaults to 32.
Returns:
An integer, the prefix length.
"""
while mask:
if ip_int & 1 == 1:
break
ip_int >>= 1
mask -= 1
return mask
def _ip_string_from_prefix(self, prefixlen=None):
"""Turn a prefix length into a dotted decimal string.
Args:
prefixlen: An integer, the netmask prefix length.
Returns:
A string, the dotted decimal netmask string.
"""
if not prefixlen:
prefixlen = self._prefixlen
return self._string_from_ip_int(self._ip_int_from_prefix(prefixlen))
def iter_subnets(self, prefixlen_diff=1, new_prefix=None):
"""The subnets which join to make the current subnet.
In the case that self contains only one IP
(self._prefixlen == 32 for IPv4 or self._prefixlen == 128
for IPv6), return a list with just ourself.
Args:
prefixlen_diff: An integer, the amount the prefix length
should be increased by. This should not be set if
new_prefix is also set.
new_prefix: The desired new prefix length. This must be a
larger number (smaller prefix) than the existing prefix.
This should not be set if prefixlen_diff is also set.
Returns:
An iterator of IPv(4|6) objects.
Raises:
ValueError: The prefixlen_diff is too small or too large.
OR
prefixlen_diff and new_prefix are both set or new_prefix
is a smaller number than the current prefix (smaller
number means a larger network)
"""
if self._prefixlen == self._max_prefixlen:
yield self
return
if new_prefix is not None:
if new_prefix < self._prefixlen:
raise ValueError('new prefix must be longer')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = new_prefix - self._prefixlen
if prefixlen_diff < 0:
raise ValueError('prefix length diff must be > 0')
new_prefixlen = self._prefixlen + prefixlen_diff
if not self._is_valid_netmask(str(new_prefixlen)):
raise ValueError(
'prefix length diff %d is invalid for netblock %s' % (
new_prefixlen, str(self)))
first = IPNetwork('%s/%s' % (str(self.network),
str(self._prefixlen + prefixlen_diff)),
version=self._version)
yield first
current = first
while True:
broadcast = current.broadcast
if broadcast == self.broadcast:
return
new_addr = IPAddress(int(broadcast) + 1, version=self._version)
current = IPNetwork('%s/%s' % (str(new_addr), str(new_prefixlen)),
version=self._version)
yield current
def subnet(self, prefixlen_diff=1, new_prefix=None):
"""Return a list of subnets, rather than an interator."""
return list(self.iter_subnets(prefixlen_diff, new_prefix))
def supernet(self, prefixlen_diff=1, new_prefix=None):
"""The supernet containing the current network.
Args:
prefixlen_diff: An integer, the amount the prefix length of
the network should be decreased by. For example, given a
/24 network and a prefixlen_diff of 3, a supernet with a
/21 netmask is returned.
Returns:
An IPv4 network object.
Raises:
ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have a
negative prefix length.
OR
If prefixlen_diff and new_prefix are both set or new_prefix is a
larger number than the current prefix (larger number means a
smaller network)
"""
if self._prefixlen == 0:
return self
if new_prefix is not None:
if new_prefix > self._prefixlen:
raise ValueError('new prefix must be shorter')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = self._prefixlen - new_prefix
if self.prefixlen - prefixlen_diff < 0:
raise ValueError(
'current prefixlen is %d, cannot have a prefixlen_diff of %d' %
(self.prefixlen, prefixlen_diff))
return IPNetwork('%s/%s' % (str(self.network),
str(self.prefixlen - prefixlen_diff)),
version=self._version)
# backwards compatibility
Subnet = subnet
Supernet = supernet
AddressExclude = address_exclude
CompareNetworks = compare_networks
Contains = __contains__
class _BaseV4(object):
"""Base IPv4 object.
The following methods are used by IPv4 objects in both single IP
addresses and networks.
"""
# Equivalent to 255.255.255.255 or 32 bits of 1's.
_ALL_ONES = (2**32) - 1
def __init__(self, address):
self._version = 4
self._max_prefixlen = 32
def _explode_shorthand_ip_string(self, ip_str=None):
if not ip_str:
ip_str = str(self)
return ip_str
def _ip_int_from_string(self, ip_str):
"""Turn the given IP string into an integer for comparison.
Args:
ip_str: A string, the IP ip_str.
Returns:
The IP ip_str as an integer.
Raises:
AddressValueError: if the string isn't a valid IP string.
"""
packed_ip = 0
octets = ip_str.split('.')
if len(octets) != 4:
raise AddressValueError(ip_str)
for oc in octets:
try:
packed_ip = (packed_ip << 8) | int(oc)
except ValueError:
raise AddressValueError(ip_str)
return packed_ip
def _string_from_ip_int(self, ip_int):
"""Turns a 32-bit integer into dotted decimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
The IP address as a string in dotted decimal notation.
"""
octets = []
for _ in xrange(4):
octets.insert(0, str(ip_int & 0xFF))
ip_int >>= 8
return '.'.join(octets)
def _is_valid_ip(self, address):
"""Validate the dotted decimal notation IP/netmask string.
Args:
address: A string, either representing a quad-dotted ip
or an integer which is a valid IPv4 IP address.
Returns:
A boolean, True if the string is a valid dotted decimal IP
string.
"""
octets = address.split('.')
if len(octets) == 1:
# We have an integer rather than a dotted decimal IP.
try:
return int(address) >= 0 and int(address) <= self._ALL_ONES
except ValueError:
return False
if len(octets) != 4:
return False
for octet in octets:
try:
if not 0 <= int(octet) <= 255:
return False
except ValueError:
return False
return True
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def packed(self):
"""The binary representation of this address."""
return struct.pack('!I', self._ip)
@property
def version(self):
return self._version
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within the
reserved IPv4 Network range.
"""
return self in IPv4Network('240.0.0.0/4')
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per RFC 1918.
"""
return (self in IPv4Network('10.0.0.0/8') or
self in IPv4Network('172.16.0.0/12') or
self in IPv4Network('192.168.0.0/16'))
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is multicast.
See RFC 3171 for details.
"""
return self in IPv4Network('224.0.0.0/4')
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 5735 3.
"""
return self in IPv4Network('0.0.0.0')
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback per RFC 3330.
"""
return self in IPv4Network('127.0.0.0/8')
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is link-local per RFC 3927.
"""
return self in IPv4Network('169.254.0.0/16')
class IPv4Address(_BaseV4, _BaseIP):
"""Represent and manipulate single IPv4 Addresses."""
def __init__(self, address):
"""
Args:
address: A string or integer representing the IP
'192.168.1.1'
Additionally, an integer can be passed, so
IPv4Address('192.168.1.1') == IPv4Address(3232235777).
or, more generally
IPv4Address(int(IPv4Address('192.168.1.1'))) ==
IPv4Address('192.168.1.1')
Raises:
AddressValueError: If ipaddr isn't a valid IPv4 address.
"""
_BaseIP.__init__(self, address)
_BaseV4.__init__(self, address)
# Efficient constructor from integer.
if isinstance(address, (int, long)):
self._ip = address
if address < 0 or address > self._ALL_ONES:
raise AddressValueError(address)
return
# Constructing from a packed address
if _compat_has_real_bytes:
if isinstance(address, bytes) and len(address) == 4:
self._ip = struct.unpack('!I', address)[0]
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = str(address)
if not self._is_valid_ip(addr_str):
raise AddressValueError(addr_str)
self._ip = self._ip_int_from_string(addr_str)
class IPv4Network(_BaseV4, _BaseNet):
"""This class represents and manipulates 32-bit IPv4 networks.
Attributes: [examples for IPv4Network('1.2.3.4/27')]
._ip: 16909060
.ip: IPv4Address('1.2.3.4')
.network: IPv4Address('1.2.3.0')
.hostmask: IPv4Address('0.0.0.31')
.broadcast: IPv4Address('1.2.3.31')
.netmask: IPv4Address('255.255.255.224')
.prefixlen: 27
"""
# the valid octets for host and netmasks. only useful for IPv4.
_valid_mask_octets = set((255, 254, 252, 248, 240, 224, 192, 128, 0))
def __init__(self, address, strict=False):
"""Instantiate a new IPv4 network object.
Args:
address: A string or integer representing the IP [& network].
'192.168.1.1/24'
'192.168.1.1/255.255.255.0'
'192.168.1.1/0.0.0.255'
are all functionally the same in IPv4. Similarly,
'192.168.1.1'
'192.168.1.1/255.255.255.255'
'192.168.1.1/32'
are also functionaly equivalent. That is to say, failing to
provide a subnetmask will create an object with a mask of /32.
If the mask (portion after the / in the argument) is given in
dotted quad form, it is treated as a netmask if it starts with a
non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it
starts with a zero field (e.g. 0.255.255.255 == /8), with the
single exception of an all-zero mask which is treated as a
netmask == /0. If no mask is given, a default of /32 is used.
Additionally, an integer can be passed, so
IPv4Network('192.168.1.1') == IPv4Network(3232235777).
or, more generally
IPv4Network(int(IPv4Network('192.168.1.1'))) ==
IPv4Network('192.168.1.1')
strict: A boolean. If true, ensure that we have been passed
A true network address, eg, 192.168.1.0/24 and not an
IP address on a network, eg, 192.168.1.1/24.
Raises:
AddressValueError: If ipaddr isn't a valid IPv4 address.
NetmaskValueError: If the netmask isn't valid for
an IPv4 address.
ValueError: If strict was True and a network address was not
supplied.
"""
_BaseNet.__init__(self, address)
_BaseV4.__init__(self, address)
# Efficient constructor from integer.
if isinstance(address, (int, long)):
self._ip = address
self.ip = IPv4Address(self._ip)
self._prefixlen = 32
self.netmask = IPv4Address(self._ALL_ONES)
if address < 0 or address > self._ALL_ONES:
raise AddressValueError(address)
return
# Constructing from a packed address
if _compat_has_real_bytes:
if isinstance(address, bytes) and len(address) == 4:
self._ip = struct.unpack('!I', address)[0]
self.ip = IPv4Address(self._ip)
self._prefixlen = 32
self.netmask = IPv4Address(self._ALL_ONES)
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = str(address).split('/')
if len(addr) > 2:
raise AddressValueError(address)
if not self._is_valid_ip(addr[0]):
raise AddressValueError(addr[0])
self._ip = self._ip_int_from_string(addr[0])
self.ip = IPv4Address(self._ip)
if len(addr) == 2:
mask = addr[1].split('.')
if len(mask) == 4:
# We have dotted decimal netmask.
if self._is_valid_netmask(addr[1]):
self.netmask = IPv4Address(self._ip_int_from_string(
addr[1]))
elif self._is_hostmask(addr[1]):
self.netmask = IPv4Address(
self._ip_int_from_string(addr[1]) ^ self._ALL_ONES)
else:
raise NetmaskValueError('%s is not a valid netmask'
% addr[1])
self._prefixlen = self._prefix_from_ip_int(int(self.netmask))
else:
# We have a netmask in prefix length form.
if not self._is_valid_netmask(addr[1]):
raise NetmaskValueError(addr[1])
self._prefixlen = int(addr[1])
self.netmask = IPv4Address(self._ip_int_from_prefix(
self._prefixlen))
else:
self._prefixlen = 32
self.netmask = IPv4Address(self._ip_int_from_prefix(
self._prefixlen))
if strict:
if self.ip != self.network:
raise ValueError('%s has host bits set' %
self.ip)
def _is_hostmask(self, ip_str):
"""Test if the IP string is a hostmask (rather than a netmask).
Args:
ip_str: A string, the potential hostmask.
Returns:
A boolean, True if the IP string is a hostmask.
"""
bits = ip_str.split('.')
try:
parts = [int(x) for x in bits if int(x) in self._valid_mask_octets]
except ValueError:
return False
if len(parts) != len(bits):
return False
if parts[0] < parts[-1]:
return True
return False
def _is_valid_netmask(self, netmask):
"""Verify that the netmask is valid.
Args:
netmask: A string, either a prefix or dotted decimal
netmask.
Returns:
A boolean, True if the prefix represents a valid IPv4
netmask.
"""
mask = netmask.split('.')
if len(mask) == 4:
if [x for x in mask if int(x) not in self._valid_mask_octets]:
return False
if [x for idx, y in enumerate(mask) if idx > 0 and
y > mask[idx - 1]]:
return False
return True
try:
netmask = int(netmask)
except ValueError:
return False
return 0 <= netmask <= 32
# backwards compatibility
IsRFC1918 = lambda self: self.is_private
IsMulticast = lambda self: self.is_multicast
IsLoopback = lambda self: self.is_loopback
IsLinkLocal = lambda self: self.is_link_local
class _BaseV6(object):
"""Base IPv6 object.
The following methods are used by IPv6 objects in both single IP
addresses and networks.
"""
_ALL_ONES = (2**128) - 1
def __init__(self, address):
self._version = 6
self._max_prefixlen = 128
def _ip_int_from_string(self, ip_str=None):
"""Turn an IPv6 ip_str into an integer.
Args:
ip_str: A string, the IPv6 ip_str.
Returns:
A long, the IPv6 ip_str.
Raises:
AddressValueError: if ip_str isn't a valid IP Address.
"""
if not ip_str:
ip_str = str(self.ip)
ip_int = 0
fields = self._explode_shorthand_ip_string(ip_str).split(':')
# Do we have an IPv4 mapped (::ffff:a.b.c.d) or compact (::a.b.c.d)
# ip_str?
if fields[-1].count('.') == 3:
ipv4_string = fields.pop()
ipv4_int = IPv4Network(ipv4_string)._ip
octets = []
for _ in xrange(2):
octets.append(hex(ipv4_int & 0xFFFF).lstrip('0x').rstrip('L'))
ipv4_int >>= 16
fields.extend(reversed(octets))
for field in fields:
try:
ip_int = (ip_int << 16) + int(field or '0', 16)
except ValueError:
raise AddressValueError(ip_str)
return ip_int
def _compress_hextets(self, hextets):
"""Compresses a list of hextets.
Compresses a list of strings, replacing the longest continuous
sequence of "0" in the list with "" and adding empty strings at
the beginning or at the end of the string such that subsequently
calling ":".join(hextets) will produce the compressed version of
the IPv6 address.
Args:
hextets: A list of strings, the hextets to compress.
Returns:
A list of strings.
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
for index in range(len(hextets)):
if hextets[index] == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
return hextets
def _string_from_ip_int(self, ip_int=None):
"""Turns a 128-bit integer into hexadecimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
A string, the hexadecimal representation of the address.
Raises:
ValueError: The address is bigger than 128 bits of all ones.
"""
if not ip_int and ip_int != 0:
ip_int = int(self._ip)
if ip_int > self._ALL_ONES:
raise ValueError('IPv6 address is too large')
hex_str = '%032x' % ip_int
hextets = []
for x in range(0, 32, 4):
hextets.append('%x' % int(hex_str[x:x+4], 16))
hextets = self._compress_hextets(hextets)
return ':'.join(hextets)
def _explode_shorthand_ip_string(self, ip_str=None):
"""Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if not ip_str:
ip_str = str(self)
if isinstance(self, _BaseNet):
ip_str = str(self.ip)
if self._is_shorthand_ip(ip_str):
new_ip = []
hextet = ip_str.split('::')
sep = len(hextet[0].split(':')) + len(hextet[1].split(':'))
new_ip = hextet[0].split(':')
for _ in xrange(8 - sep):
new_ip.append('0000')
new_ip += hextet[1].split(':')
# Now need to make sure every hextet is 4 lower case characters.
# If a hextet is < 4 characters, we've got missing leading 0's.
ret_ip = []
for hextet in new_ip:
ret_ip.append(('0' * (4 - len(hextet)) + hextet).lower())
return ':'.join(ret_ip)
# We've already got a longhand ip_str.
return ip_str
def _is_valid_ip(self, ip_str):
"""Ensure we have a valid IPv6 address.
Probably not as exhaustive as it should be.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if this is a valid IPv6 address.
"""
# We need to have at least one ':'.
if ':' not in ip_str:
return False
# We can only have one '::' shortener.
if ip_str.count('::') > 1:
return False
# '::' should be encompassed by start, digits or end.
if ':::' in ip_str:
return False
# A single colon can neither start nor end an address.
if ((ip_str.startswith(':') and not ip_str.startswith('::')) or
(ip_str.endswith(':') and not ip_str.endswith('::'))):
return False
# If we have no concatenation, we need to have 8 fields with 7 ':'.
if '::' not in ip_str and ip_str.count(':') != 7:
# We might have an IPv4 mapped address.
if ip_str.count('.') != 3:
return False
ip_str = self._explode_shorthand_ip_string(ip_str)
# Now that we have that all squared away, let's check that each of the
# hextets are between 0x0 and 0xFFFF.
for hextet in ip_str.split(':'):
if hextet.count('.') == 3:
# If we have an IPv4 mapped address, the IPv4 portion has to
# be at the end of the IPv6 portion.
if not ip_str.split(':')[-1] == hextet:
return False
try:
IPv4Network(hextet)
except AddressValueError:
return False
else:
try:
# a value error here means that we got a bad hextet,
# something like 0xzzzz
if int(hextet, 16) < 0x0 or int(hextet, 16) > 0xFFFF:
return False
except ValueError:
return False
return True
def _is_shorthand_ip(self, ip_str=None):
"""Determine if the address is shortened.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if the address is shortened.
"""
if ip_str.count('::') == 1:
return True
return False
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def packed(self):
"""The binary representation of this address."""
return struct.pack('!QQ', self._ip >> 64, self._ip & (2**64 - 1))
@property
def version(self):
return self._version
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is a multicast address.
See RFC 2373 2.7 for details.
"""
return self in IPv6Network('ff00::/8')
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within one of the
reserved IPv6 Network ranges.
"""
return (self in IPv6Network('::/8') or
self in IPv6Network('100::/8') or
self in IPv6Network('200::/7') or
self in IPv6Network('400::/6') or
self in IPv6Network('800::/5') or
self in IPv6Network('1000::/4') or
self in IPv6Network('4000::/3') or
self in IPv6Network('6000::/3') or
self in IPv6Network('8000::/3') or
self in IPv6Network('A000::/3') or
self in IPv6Network('C000::/3') or
self in IPv6Network('E000::/4') or
self in IPv6Network('F000::/5') or
self in IPv6Network('F800::/6') or
self in IPv6Network('FE00::/9'))
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return self == IPv6Network('::')
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback address as defined in
RFC 2373 2.5.3.
"""
return self == IPv6Network('::1')
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is reserved per RFC 4291.
"""
return self in IPv6Network('fe80::/10')
@property
def is_site_local(self):
"""Test if the address is reserved for site-local.
Note that the site-local address space has been deprecated by RFC 3879.
Use is_private to test if this address is in the space of unique local
addresses as defined by RFC 4193.
Returns:
A boolean, True if the address is reserved per RFC 3513 2.5.6.
"""
return self in IPv6Network('fec0::/10')
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per RFC 4193.
"""
return self in IPv6Network('fc00::/7')
@property
def ipv4_mapped(self):
"""Return the IPv4 mapped address.
Returns:
If the IPv6 address is a v4 mapped address, return the
IPv4 mapped address. Return None otherwise.
"""
hextets = self._explode_shorthand_ip_string().split(':')
if hextets[-3] != 'ffff':
return None
try:
return IPv4Address(int('%s%s' % (hextets[-2], hextets[-1]), 16))
except IPv4IpvalidationError:
return None
class IPv6Address(_BaseV6, _BaseIP):
"""Represent and manipulate single IPv6 Addresses.
"""
def __init__(self, address):
"""Instantiate a new IPv6 address object.
<|fim▁hole|>
Additionally, an integer can be passed, so
IPv6Address('2001:4860::') ==
IPv6Address(42541956101370907050197289607612071936L).
or, more generally
IPv6Address(IPv6Address('2001:4860::')._ip) ==
IPv6Address('2001:4860::')
Raises:
IPv6IpValidationError: If address isn't a valid IPv6 address.
"""
_BaseIP.__init__(self, address)
_BaseV6.__init__(self, address)
# Efficient constructor from integer.
if isinstance(address, (int, long)):
self._ip = address
if address < 0 or address > self._ALL_ONES:
raise AddressValueError(address)
return
# Constructing from a packed address
if _compat_has_real_bytes:
if isinstance(address, bytes) and len(address) == 16:
tmp = struct.unpack('!QQ', address)
self._ip = (tmp[0] << 64) | tmp[1]
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = str(address)
if not addr_str:
raise AddressValueError('')
self._ip = self._ip_int_from_string(addr_str)
class IPv6Network(_BaseV6, _BaseNet):
"""This class represents and manipulates 128-bit IPv6 networks.
Attributes: [examples for IPv6('2001:658:22A:CAFE:200::1/64')]
.ip: IPv6Address('2001:658:22a:cafe:200::1')
.network: IPv6Address('2001:658:22a:cafe::')
.hostmask: IPv6Address('::ffff:ffff:ffff:ffff')
.broadcast: IPv6Address('2001:658:22a:cafe:ffff:ffff:ffff:ffff')
.netmask: IPv6Address('ffff:ffff:ffff:ffff::')
.prefixlen: 64
"""
def __init__(self, address, strict=False):
"""Instantiate a new IPv6 Network object.
Args:
address: A string or integer representing the IPv6 network or the IP
and prefix/netmask.
'2001:4860::/128'
'2001:4860:0000:0000:0000:0000:0000:0000/128'
'2001:4860::'
are all functionally the same in IPv6. That is to say,
failing to provide a subnetmask will create an object with
a mask of /128.
Additionally, an integer can be passed, so
IPv6Network('2001:4860::') ==
IPv6Network(42541956101370907050197289607612071936L).
or, more generally
IPv6Network(IPv6Network('2001:4860::')._ip) ==
IPv6Network('2001:4860::')
strict: A boolean. If true, ensure that we have been passed
A true network address, eg, 192.168.1.0/24 and not an
IP address on a network, eg, 192.168.1.1/24.
Raises:
AddressValueError: If address isn't a valid IPv6 address.
NetmaskValueError: If the netmask isn't valid for
an IPv6 address.
ValueError: If strict was True and a network address was not
supplied.
"""
_BaseNet.__init__(self, address)
_BaseV6.__init__(self, address)
# Efficient constructor from integer.
if isinstance(address, (int, long)):
self._ip = address
self.ip = IPv6Address(self._ip)
self._prefixlen = 128
self.netmask = IPv6Address(self._ALL_ONES)
if address < 0 or address > self._ALL_ONES:
raise AddressValueError(address)
return
# Constructing from a packed address
if _compat_has_real_bytes:
if isinstance(address, bytes) and len(address) == 16:
tmp = struct.unpack('!QQ', address)
self._ip = (tmp[0] << 64) | tmp[1]
self.ip = IPv6Address(self._ip)
self._prefixlen = 128
self.netmask = IPv6Address(self._ALL_ONES)
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = str(address).split('/')
if len(addr) > 2:
raise AddressValueError(address)
if not self._is_valid_ip(addr[0]):
raise AddressValueError(addr[0])
if len(addr) == 2:
if self._is_valid_netmask(addr[1]):
self._prefixlen = int(addr[1])
else:
raise NetmaskValueError(addr[1])
else:
self._prefixlen = 128
self.netmask = IPv6Address(self._ip_int_from_prefix(self._prefixlen))
self._ip = self._ip_int_from_string(addr[0])
self.ip = IPv6Address(self._ip)
if strict:
if self.ip != self.network:
raise ValueError('%s has host bits set' %
self.ip)
def _is_valid_netmask(self, prefixlen):
"""Verify that the netmask/prefixlen is valid.
Args:
prefixlen: A string, the netmask in prefix length format.
Returns:
A boolean, True if the prefix represents a valid IPv6
netmask.
"""
try:
prefixlen = int(prefixlen)
except ValueError:
return False
return 0 <= prefixlen <= 128<|fim▁end|>
|
Args:
address: A string or integer representing the IP
|
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^([a-zA-Z0-9_\-]+)/$', views.poll, name='poll'),
url(r'^([a-zA-Z0-9_\-]+).csv$', views.poll, {'export': True}, name='poll_export_csv'),<|fim▁hole|> url(r'^([a-zA-Z0-9_\-]+)/comment/(\d+)/edit/$', views.comment, name='poll_comment_edit'),
url(r'^([a-zA-Z0-9_\-]+)/comment/(\d+)/delete/$', views.delete_comment, name='poll_deleteComment'),
url(r'^([a-zA-Z0-9_\-]+)/watch/$', views.watch, name='poll_watch'),
url(r'^([a-zA-Z0-9_\-]+)/settings/$', views.settings, name='poll_settings'),
url(r'^([a-zA-Z0-9_\-]+)/edit/choices/$', views.edit_choice, name='poll_editChoice'),
url(r'^([a-zA-Z0-9_\-]+)/edit/choices/date/$', views.edit_date_choice, name='poll_editDateChoice'),
url(r'^([a-zA-Z0-9_\-]+)/edit/choices/dateTime/date/$', views.edit_dt_choice_date, name='poll_editDTChoiceDate'),
url(r'^([a-zA-Z0-9_\-]+)/edit/choices/dateTime/time/$', views.edit_dt_choice_time, name='poll_editDTChoiceTime'),
url(r'^([a-zA-Z0-9_\-]+)/edit/choices/dateTime/combinations/$', views.edit_dt_choice_combinations,
name='poll_editDTChoiceCombinations'),
url(r'^([a-zA-Z0-9_\-]+)/edit/choices/universal/$', views.edit_universal_choice, name='poll_editUniversalChoice'),
url(r'^([a-zA-Z0-9_\-]+)/edit/choicevalues/', views.edit_choicevalues, name='poll_editchoicevalues'),
url(r'^([a-zA-Z0-9_\-]+)/edit/choicevalues_create', views.edit_choicevalues_create,
name='poll_editchoicevalues_create'),
url(r'^([a-zA-Z0-9_\-]+)/delete/$', views.delete, name='poll_delete'),
url(r'^([a-zA-Z0-9_\-]+)/vote/$', views.vote, name='poll_vote'),
url(r'^([a-zA-Z0-9_\-]+)/vote/(\d+)/assign/$', views.vote_assign, name='poll_voteAssign'),
url(r'^([a-zA-Z0-9_\-]+)/vote/(\d+)/edit/$', views.vote, name='poll_voteEdit'),
url(r'^([a-zA-Z0-9_\-]+)/vote/(\d+)/delete/$', views.vote_delete, name='poll_voteDelete'),
url(r'^([a-zA-Z0-9_\-]+)/copy/$', views.copy, name='poll_copy'),
]<|fim▁end|>
|
url(r'^([a-zA-Z0-9_\-]+)/comment/$', views.comment, name='poll_comment'),
|
<|file_name|>flatmap.rs<|end_file_name|><|fim▁begin|>#![crate_name = "sknife"]
/// Flatten and map on a list
///
/// # Arguments
///
/// * `f` - the map function
/// * `list` - A slice of elements to flatten and map
///
/// # Example
///
/// ```
/// use sknife::collection::flatmap;
/// let mut list: Vec<i32> = (1..4).collect();
/// let slice: &mut [i32] = list.as_mut_slice();
/// flatmap(slice, |x: &mut i32| vec![*x]);
///
/// ```
///
/// # Result
/// ```
/// vec![1, 2, 3];
/// ```
pub fn flatmap<A, F>(list: &mut [A], mut f: F) -> Vec<A>
where F: FnMut(&mut A) -> Vec<A> {
let mut vec = Vec::new();
for l in list {
vec.extend(f(l))
}
vec
}
mod tests {
use super::*;
#[test]
fn flatmap_empty_list() {
let mut list: Vec<i32> = vec![];
let slice: &mut [i32] = list.as_mut_slice();
assert_eq!(
flatmap(slice, |x: &mut i32| vec![*x]),
vec![]
);
}
#[test]
fn flatmap_list() {
let mut list: Vec<i32> = (1..4).collect();
let slice: &mut [i32] = list.as_mut_slice();<|fim▁hole|> );
}
}<|fim▁end|>
|
assert_eq!(
flatmap(slice, |x: &mut i32| vec![*x]),
vec![1, 2, 3]
|
<|file_name|>startup.py<|end_file_name|><|fim▁begin|># pylint: disable=unused-import, unused-variable, missing-docstring
def _readline():
try:
import readline
except ImportError:
print("Module readline not available.")
else:
import rlcompleter
readline.parse_and_bind("tab: complete")
import os
histfile = os.path.join(os.environ["HOME"], 'python', '.history')
try:
readline.read_history_file(histfile)
except IOError:
pass
import atexit
atexit.register(readline.write_history_file, histfile)<|fim▁hole|>del _readline
import sys
sys.ps1 = "\001\033[01;33m\002>>>\001\033[00m\002 "
sys.ps2 = "\001\033[01;33m\002...\001\033[00m\002 "<|fim▁end|>
|
del os, histfile
_readline()
|
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![deny(unused_imports)]
#![deny(unused_variables)]
#![feature(box_syntax)]
#![feature(convert)]
// For FFI<|fim▁hole|>
//! The `servo` test application.
//!
//! Creates a `Browser` instance with a simple implementation of
//! the compositor's `WindowMethods` to create a working web browser.
//!
//! This browser's implementation of `WindowMethods` is built on top
//! of [glutin], the cross-platform OpenGL utility and windowing
//! library.
//!
//! For the engine itself look next door in lib.rs.
//!
//! [glutin]: https://github.com/tomaka/glutin
extern crate servo;
extern crate time;
extern crate util;
extern crate errno;
extern crate compositing;
extern crate script_traits;
extern crate euclid;
extern crate libc;
extern crate msg;
extern crate gleam;
extern crate layers;
extern crate egl;
extern crate url;
extern crate net;
extern crate env_logger;
#[link(name = "stlport")]
extern {}
use util::opts;
use net::resource_task;
use servo::Browser;
use compositing::windowing::WindowEvent;
use std::env;
mod window;
mod input;
struct BrowserWrapper {
browser: Browser,
}
fn main() {
env_logger::init().unwrap();
// Parse the command line options and store them globally
if opts::from_cmdline_args(env::args().collect::<Vec<_>>().as_slice()) {
resource_task::global_init();
let window = if opts::get().headless {
None
} else {
Some(window::Window::new())
};
// Our wrapper around `Browser` that also implements some
// callbacks required by the glutin window implementation.
let mut browser = BrowserWrapper {
browser: Browser::new(window.clone()),
};
match window {
None => (),
Some(ref window) => input::run_input_loop(&window.event_send)
}
browser.browser.handle_events(vec![WindowEvent::InitializeCompositing]);
// Feed events from the window to the browser until the browser
// says to stop.
loop {
let should_continue = match window {
None => browser.browser.handle_events(vec![WindowEvent::Idle]),
Some(ref window) => {
let events = window.wait_events();
browser.browser.handle_events(events)
}
};
if !should_continue {
break
}
}
let BrowserWrapper {
browser
} = browser;
browser.shutdown();
}
}<|fim▁end|>
|
#![allow(non_snake_case, dead_code)]
|
<|file_name|>point.ts<|end_file_name|><|fim▁begin|>/**
* Модуль класса точек двухмерной плоскости
*
* @module
*/
;
/**
* Класс точек двухмерной плоскости
*/
<|fim▁hole|>{
/**
* Координата по оси абсцисс
*/
public x: number;
/**
* Координата по оси ординат
*/
public y: number;
/**
* Класс точек двухмерной плоскости
*
* @param x Координата по оси абсцисс
* @param y Координата по оси ординат
*/
public constructor( x: number, y: number )
{
this.x = x;
this.y = y;
}
}
/**
* Модуль
*/
export {
Point as default,
};<|fim▁end|>
|
class Point
|
<|file_name|>configobj.py<|end_file_name|><|fim▁begin|># configobj.py
# A config file reader/writer that supports nested sections in config files.
# Copyright (C) 2005-2014:
# (name) : (email)
# Michael Foord: fuzzyman AT voidspace DOT org DOT uk
# Nicola Larosa: nico AT tekNico DOT net
# Rob Dennis: rdennis AT gmail DOT com
# Eli Courtwright: eli AT courtwright DOT org
# This software is licensed under the terms of the BSD license.
# http://opensource.org/licenses/BSD-3-Clause
# ConfigObj 5 - main repository for documentation and issue tracking:
# https://github.com/DiffSK/configobj
import os
import re
import sys
from codecs import BOM_UTF8, BOM_UTF16, BOM_UTF16_BE, BOM_UTF16_LE
from lib.six import six
from _version import __version__
# imported lazily to avoid startup performance hit if it isn't used
compiler = None
# A dictionary mapping BOM to
# the encoding to decode with, and what to set the
# encoding attribute to.
BOMS = {
BOM_UTF8: ('utf_8', None),
BOM_UTF16_BE: ('utf16_be', 'utf_16'),
BOM_UTF16_LE: ('utf16_le', 'utf_16'),
BOM_UTF16: ('utf_16', 'utf_16'),
}
# All legal variants of the BOM codecs.
# TODO: the list of aliases is not meant to be exhaustive, is there a
# better way ?
BOM_LIST = {
'utf_16': 'utf_16',
'u16': 'utf_16',
'utf16': 'utf_16',
'utf-16': 'utf_16',
'utf16_be': 'utf16_be',
'utf_16_be': 'utf16_be',
'utf-16be': 'utf16_be',
'utf16_le': 'utf16_le',
'utf_16_le': 'utf16_le',
'utf-16le': 'utf16_le',
'utf_8': 'utf_8',
'u8': 'utf_8',
'utf': 'utf_8',
'utf8': 'utf_8',
'utf-8': 'utf_8',
}
# Map of encodings to the BOM to write.
BOM_SET = {
'utf_8': BOM_UTF8,
'utf_16': BOM_UTF16,
'utf16_be': BOM_UTF16_BE,
'utf16_le': BOM_UTF16_LE,
None: BOM_UTF8
}
def match_utf8(encoding):
return BOM_LIST.get(encoding.lower()) == 'utf_8'
# Quote strings used for writing values
squot = "'%s'"
dquot = '"%s"'
noquot = "%s"
wspace_plus = ' \r\n\v\t\'"'
tsquot = '"""%s"""'
tdquot = "'''%s'''"
# Sentinel for use in getattr calls to replace hasattr
MISSING = object()
__all__ = (
'DEFAULT_INDENT_TYPE',
'DEFAULT_INTERPOLATION',
'ConfigObjError',
'NestingError',
'ParseError',
'DuplicateError',
'ConfigspecError',
'ConfigObj',
'SimpleVal',
'InterpolationError',
'InterpolationLoopError',
'MissingInterpolationOption',
'RepeatSectionError',
'ReloadError',
'UnreprError',
'UnknownType',
'flatten_errors',
'get_extra_values'
)
DEFAULT_INTERPOLATION = 'configparser'
DEFAULT_INDENT_TYPE = ' '
MAX_INTERPOL_DEPTH = 10
OPTION_DEFAULTS = {
'interpolation': True,
'raise_errors': False,
'list_values': True,
'create_empty': False,
'file_error': False,
'configspec': None,
'stringify': True,
# option may be set to one of ('', ' ', '\t')
'indent_type': None,
'encoding': None,
'default_encoding': None,
'unrepr': False,
'write_empty_values': False,
}
# this could be replaced if six is used for compatibility, or there are no
# more assertions about items being a string
def getObj(s):
global compiler
if compiler is None:
import compiler
s = "a=" + s
p = compiler.parse(s)
return p.getChildren()[1].getChildren()[0].getChildren()[1]
class UnknownType(Exception):
pass
class Builder(object):
def build(self, o):
if m is None:
raise UnknownType(o.__class__.__name__)
return m(o)
def build_List(self, o):
return list(map(self.build, o.getChildren()))
def build_Const(self, o):
return o.value
def build_Dict(self, o):
d = {}
i = iter(map(self.build, o.getChildren()))
for el in i:
d[el] = next(i)
return d
def build_Tuple(self, o):
return tuple(self.build_List(o))
def build_Name(self, o):
if o.name == 'None':
return None
if o.name == 'True':
return True
if o.name == 'False':
return False
# An undefined Name
raise UnknownType('Undefined Name')
def build_Add(self, o):
real, imag = list(map(self.build_Const, o.getChildren()))
try:
real = float(real)
except TypeError:
raise UnknownType('Add')
if not isinstance(imag, complex) or imag.real != 0.0:
raise UnknownType('Add')
return real+imag
def build_Getattr(self, o):
parent = self.build(o.expr)
return getattr(parent, o.attrname)
def build_UnarySub(self, o):
return -self.build_Const(o.getChildren()[0])
def build_UnaryAdd(self, o):
return self.build_Const(o.getChildren()[0])
_builder = Builder()
def unrepr(s):
if not s:
return s
# this is supposed to be safe
import ast
return ast.literal_eval(s)
class ConfigObjError(SyntaxError):
"""
This is the base class for all errors that ConfigObj raises.
It is a subclass of SyntaxError.
"""
def __init__(self, message='', line_number=None, line=''):
self.line = line
self.line_number = line_number
SyntaxError.__init__(self, message)
class NestingError(ConfigObjError):
"""
This error indicates a level of nesting that doesn't match.
"""
class ParseError(ConfigObjError):
"""
This error indicates that a line is badly written.
It is neither a valid ``key = value`` line,
nor a valid section marker line.
"""
class ReloadError(IOError):
"""
A 'reload' operation failed.
This exception is a subclass of ``IOError``.
"""
def __init__(self):
IOError.__init__(self, 'reload failed, filename is not set.')
class DuplicateError(ConfigObjError):
"""
The keyword or section specified already exists.
"""
class ConfigspecError(ConfigObjError):
"""
An error occured whilst parsing a configspec.
"""
class InterpolationError(ConfigObjError):
"""Base class for the two interpolation errors."""
class InterpolationLoopError(InterpolationError):
"""Maximum interpolation depth exceeded in string interpolation."""
def __init__(self, option):
InterpolationError.__init__(
self,
'interpolation loop detected in value "%s".' % option)
class RepeatSectionError(ConfigObjError):
"""
This error indicates additional sections in a section with a
``__many__`` (repeated) section.
"""
class MissingInterpolationOption(InterpolationError):
"""A value specified for interpolation was missing."""
def __init__(self, option):
msg = 'missing option "%s" in interpolation.' % option
InterpolationError.__init__(self, msg)
class UnreprError(ConfigObjError):
"""An error parsing in unrepr mode."""
class InterpolationEngine(object):
"""
A helper class to help perform string interpolation.
This class is an abstract base class; its descendants perform
the actual work.
"""
# compiled regexp to use in self.interpolate()
_KEYCRE = re.compile(r"%\(([^)]*)\)s")
_cookie = '%'
def __init__(self, section):
# the Section instance that "owns" this engine
self.section = section
def interpolate(self, key, value):
# short-cut
if not self._cookie in value:
return value
def recursive_interpolate(key, value, section, backtrail):
"""The function that does the actual work.
``value``: the string we're trying to interpolate.
``section``: the section in which that string was found
``backtrail``: a dict to keep track of where we've been,
to detect and prevent infinite recursion loops
This is similar to a depth-first-search algorithm.
"""
# Have we been here already?
if (key, section.name) in backtrail:
# Yes - infinite loop detected
raise InterpolationLoopError(key)
# Place a marker on our backtrail so we won't come back here again
backtrail[(key, section.name)] = 1
# Now start the actual work
match = self._KEYCRE.search(value)
while match:
# The actual parsing of the match is implementation-dependent,
# so delegate to our helper function
k, v, s = self._parse_match(match)
if k is None:
# That's the signal that no further interpolation is needed
replacement = v
else:
# Further interpolation may be needed to obtain final value
replacement = recursive_interpolate(k, v, s, backtrail)
# Replace the matched string with its final value
start, end = match.span()
value = ''.join((value[:start], replacement, value[end:]))
new_search_start = start + len(replacement)
# Pick up the next interpolation key, if any, for next time
# through the while loop
match = self._KEYCRE.search(value, new_search_start)
# Now safe to come back here again; remove marker from backtrail
del backtrail[(key, section.name)]
return value
# Back in interpolate(), all we have to do is kick off the recursive
# function with appropriate starting values
value = recursive_interpolate(key, value, self.section, {})
return value
def _fetch(self, key):
"""Helper function to fetch values from owning section.
Returns a 2-tuple: the value, and the section where it was found.
"""
# switch off interpolation before we try and fetch anything !
save_interp = self.section.main.interpolation
self.section.main.interpolation = False
# Start at section that "owns" this InterpolationEngine
current_section = self.section
while True:
# try the current section first
val = current_section.get(key)
if val is not None and not isinstance(val, Section):
break
# try "DEFAULT" next
val = current_section.get('DEFAULT', {}).get(key)
if val is not None and not isinstance(val, Section):
break
# move up to parent and try again
# top-level's parent is itself
if current_section.parent is current_section:
# reached top level, time to give up
break
current_section = current_section.parent
# restore interpolation to previous value before returning
self.section.main.interpolation = save_interp
if val is None:
raise MissingInterpolationOption(key)
return val, current_section
def _parse_match(self, match):
"""Implementation-dependent helper function.
Will be passed a match object corresponding to the interpolation
key we just found (e.g., "%(foo)s" or "$foo"). Should look up that
key in the appropriate config file section (using the ``_fetch()``
helper function) and return a 3-tuple: (key, value, section)
``key`` is the name of the key we're looking for
``value`` is the value found for that key
``section`` is a reference to the section where it was found
``key`` and ``section`` should be None if no further
interpolation should be performed on the resulting value
(e.g., if we interpolated "$$" and returned "$").
"""
raise NotImplementedError()
class ConfigParserInterpolation(InterpolationEngine):
"""Behaves like ConfigParser."""
_cookie = '%'
_KEYCRE = re.compile(r"%\(([^)]*)\)s")
def _parse_match(self, match):
key = match.group(1)
value, section = self._fetch(key)
return key, value, section
class TemplateInterpolation(InterpolationEngine):
"""Behaves like string.Template."""
_cookie = '$'
_delimiter = '$'
_KEYCRE = re.compile(r"""
\$(?:
(?P<escaped>\$) | # Two $ signs
(?P<named>[_a-z][_a-z0-9]*) | # $name format
{(?P<braced>[^}]*)} # ${name} format
)
""", re.IGNORECASE | re.VERBOSE)
def _parse_match(self, match):
# Valid name (in or out of braces): fetch value from section
key = match.group('named') or match.group('braced')
if key is not None:
value, section = self._fetch(key)
return key, value, section
# Escaped delimiter (e.g., $$): return single delimiter
if match.group('escaped') is not None:
# Return None for key and section to indicate it's time to stop
return None, self._delimiter, None
# Anything else: ignore completely, just return it unchanged
return None, match.group(), None
interpolation_engines = {
'configparser': ConfigParserInterpolation,
'template': TemplateInterpolation,
}
def __newobj__(cls, *args):
# Hack for pickle
return cls.__new__(cls, *args)
class Section(dict):
"""
A dictionary-like object that represents a section in a config file.
It does string interpolation if the 'interpolation' attribute
of the 'main' object is set to True.
Interpolation is tried first from this object, then from the 'DEFAULT'
section of this object, next from the parent and its 'DEFAULT' section,
and so on until the main object is reached.
A Section will behave like an ordered dictionary - following the
order of the ``scalars`` and ``sections`` attributes.
You can use this to change the order of members.
Iteration follows the order: scalars, then sections.
"""
def __setstate__(self, state):
dict.update(self, state[0])
self.__dict__.update(state[1])
def __reduce__(self):
state = (dict(self), self.__dict__)
return (__newobj__, (self.__class__,), state)
def __init__(self, parent, depth, main, indict=None, name=None):
"""
* parent is the section above
* depth is the depth level of this section
* main is the main ConfigObj
* indict is a dictionary to initialise the section with
"""
if indict is None:
indict = {}
dict.__init__(self)
# used for nesting level *and* interpolation
self.parent = parent
# used for the interpolation attribute
self.main = main
# level of nesting depth of this Section
self.depth = depth
# purely for information
self.name = name
#
self._initialise()
# we do this explicitly so that __setitem__ is used properly
# (rather than just passing to ``dict.__init__``)
for entry, value in indict.items():
self[entry] = value
def _initialise(self):
# the sequence of scalar values in this Section
self.scalars = []
# the sequence of sections in this Section
self.sections = []
# for comments :-)
self.comments = {}
self.inline_comments = {}
# the configspec
self.configspec = None
# for defaults
self.defaults = []
self.default_values = {}
self.extra_values = []
self._created = False
def _interpolate(self, key, value):
try:
# do we already have an interpolation engine?
engine = self._interpolation_engine
except AttributeError:
# not yet: first time running _interpolate(), so pick the engine
name = self.main.interpolation
if name == True: # note that "if name:" would be incorrect here
# backwards-compatibility: interpolation=True means use default
name = DEFAULT_INTERPOLATION
name = name.lower() # so that "Template", "template", etc. all work
class_ = interpolation_engines.get(name, None)
if class_ is None:
# invalid value for self.main.interpolation
self.main.interpolation = False
return value
else:
# save reference to engine so we don't have to do this again
engine = self._interpolation_engine = class_(self)
# let the engine do the actual work
return engine.interpolate(key, value)
def __getitem__(self, key):
"""Fetch the item and do string interpolation."""
val = dict.__getitem__(self, key)
if self.main.interpolation:
if isinstance(val, six.string_types):
return self._interpolate(key, val)
if isinstance(val, list):
def _check(entry):
if isinstance(entry, six.string_types):
return self._interpolate(key, entry)
return entry
new = [_check(entry) for entry in val]
if new != val:
return new
return val
def __setitem__(self, key, value, unrepr=False):
"""
Correctly set a value.
Making dictionary values Section instances.
(We have to special case 'Section' instances - which are also dicts)
Keys must be strings.
Values need only be strings (or lists of strings) if
``main.stringify`` is set.
``unrepr`` must be set when setting a value to a dictionary, without
creating a new sub-section.
"""
if not isinstance(key, six.string_types):
raise ValueError('The key "%s" is not a string.' % key)
# add the comment
if key not in self.comments:
self.comments[key] = []
self.inline_comments[key] = ''
# remove the entry from defaults
if key in self.defaults:
self.defaults.remove(key)
#
if isinstance(value, Section):
if key not in self:
self.sections.append(key)
dict.__setitem__(self, key, value)
elif isinstance(value, dict) and not unrepr:
# First create the new depth level,
# then create the section
if key not in self:
self.sections.append(key)
new_depth = self.depth + 1
dict.__setitem__(
self,
key,
Section(
self,
new_depth,
self.main,
indict=value,
name=key))
else:
if key not in self:
self.scalars.append(key)
if not self.main.stringify:
if isinstance(value, six.string_types):
pass
elif isinstance(value, (list, tuple)):
for entry in value:
if not isinstance(entry, six.string_types):
raise TypeError('Value is not a string "%s".' % entry)
else:
raise TypeError('Value is not a string "%s".' % value)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
"""Remove items from the sequence when deleting."""
dict. __delitem__(self, key)
if key in self.scalars:
self.scalars.remove(key)
else:
self.sections.remove(key)
del self.comments[key]
del self.inline_comments[key]
def get(self, key, default=None):
"""A version of ``get`` that doesn't bypass string interpolation."""
try:
return self[key]
except KeyError:
return default
def update(self, indict):
"""
A version of update that uses our ``__setitem__``.
"""
for entry in indict:
self[entry] = indict[entry]
def pop(self, key, default=MISSING):
"""
'D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised'
"""
try:
val = self[key]
except KeyError:
if default is MISSING:
raise
val = default
else:
del self[key]
return val
def popitem(self):
"""Pops the first (key,val)"""
sequence = (self.scalars + self.sections)
if not sequence:
raise KeyError(": 'popitem(): dictionary is empty'")
key = sequence[0]
val = self[key]
del self[key]
return key, val
def clear(self):
"""
A version of clear that also affects scalars/sections
Also clears comments and configspec.
Leaves other attributes alone :
depth/main/parent are not affected
"""
dict.clear(self)
self.scalars = []
self.sections = []
self.comments = {}
self.inline_comments = {}
self.configspec = None
self.defaults = []
self.extra_values = []
def setdefault(self, key, default=None):
"""A version of setdefault that sets sequence if appropriate."""
try:
return self[key]
except KeyError:
self[key] = default
return self[key]
def items(self):
"""D.items() -> list of D's (key, value) pairs, as 2-tuples"""
return list(zip((self.scalars + self.sections), list(self.values())))
def keys(self):
"""D.keys() -> list of D's keys"""
return (self.scalars + self.sections)
def values(self):
"""D.values() -> list of D's values"""
return [self[key] for key in (self.scalars + self.sections)]
def iteritems(self):
"""D.iteritems() -> an iterator over the (key, value) items of D"""
return iter(list(self.items()))
def iterkeys(self):
"""D.iterkeys() -> an iterator over the keys of D"""
return iter((self.scalars + self.sections))
__iter__ = iterkeys
def itervalues(self):
"""D.itervalues() -> an iterator over the values of D"""
return iter(list(self.values()))
def __repr__(self):
"""x.__repr__() <==> repr(x)"""
def _getval(key):
try:
return self[key]
except MissingInterpolationOption:
return dict.__getitem__(self, key)
return '{%s}' % ', '.join([('%s: %s' % (repr(key), repr(_getval(key))))
for key in (self.scalars + self.sections)])
__str__ = __repr__
__str__.__doc__ = "x.__str__() <==> str(x)"
# Extra methods - not in a normal dictionary
def dict(self):
"""
Return a deepcopy of self as a dictionary.
All members that are ``Section`` instances are recursively turned to
ordinary dictionaries - by calling their ``dict`` method.
>>> n = a.dict()
>>> n == a
1
>>> n is a
0
"""
newdict = {}
for entry in self:
this_entry = self[entry]
if isinstance(this_entry, Section):
this_entry = this_entry.dict()
elif isinstance(this_entry, list):
# create a copy rather than a reference
this_entry = list(this_entry)
elif isinstance(this_entry, tuple):
# create a copy rather than a reference
this_entry = tuple(this_entry)
newdict[entry] = this_entry
return newdict
def merge(self, indict):
"""
A recursive update - useful for merging config files.
>>> a = '''[section1]
... option1 = True
... [[subsection]]
... more_options = False
... # end of file'''.splitlines()
>>> b = '''# File is user.ini
... [section1]
... option1 = False
... # end of file'''.splitlines()
>>> c1 = ConfigObj(b)
>>> c2 = ConfigObj(a)
>>> c2.merge(c1)
>>> c2
ConfigObj({'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}})
"""
for key, val in list(indict.items()):
if (key in self and isinstance(self[key], dict) and
isinstance(val, dict)):
self[key].merge(val)
else:
self[key] = val
def rename(self, oldkey, newkey):
"""
Change a keyname to another, without changing position in sequence.
Implemented so that transformations can be made on keys,
as well as on values. (used by encode and decode)
Also renames comments.
"""
if oldkey in self.scalars:
the_list = self.scalars
elif oldkey in self.sections:
the_list = self.sections
else:
raise KeyError('Key "%s" not found.' % oldkey)
pos = the_list.index(oldkey)
#
val = self[oldkey]
dict.__delitem__(self, oldkey)
dict.__setitem__(self, newkey, val)
the_list.remove(oldkey)
the_list.insert(pos, newkey)
comm = self.comments[oldkey]
inline_comment = self.inline_comments[oldkey]
del self.comments[oldkey]
del self.inline_comments[oldkey]
self.comments[newkey] = comm
self.inline_comments[newkey] = inline_comment
def walk(self, function, raise_errors=True,
call_on_sections=False, **keywargs):
"""
Walk every member and call a function on the keyword and value.
Return a dictionary of the return values
If the function raises an exception, raise the errror
unless ``raise_errors=False``, in which case set the return value to
``False``.
Any unrecognised keyword arguments you pass to walk, will be pased on
to the function you pass in.
Note: if ``call_on_sections`` is ``True`` then - on encountering a
subsection, *first* the function is called for the *whole* subsection,
and then recurses into it's members. This means your function must be
able to handle strings, dictionaries and lists. This allows you
to change the key of subsections as well as for ordinary members. The
return value when called on the whole subsection has to be discarded.
See the encode and decode methods for examples, including functions.
.. admonition:: caution
You can use ``walk`` to transform the names of members of a section
but you mustn't add or delete members.
>>> config = '''[XXXXsection]
... XXXXkey = XXXXvalue'''.splitlines()
>>> cfg = ConfigObj(config)
>>> cfg
ConfigObj({'XXXXsection': {'XXXXkey': 'XXXXvalue'}})
>>> def transform(section, key):
... val = section[key]
... newkey = key.replace('XXXX', 'CLIENT1')
... section.rename(key, newkey)
... if isinstance(val, (tuple, list, dict)):
... pass
... else:
... val = val.replace('XXXX', 'CLIENT1')
... section[newkey] = val
>>> cfg.walk(transform, call_on_sections=True)
{'CLIENT1section': {'CLIENT1key': None}}
>>> cfg
ConfigObj({'CLIENT1section': {'CLIENT1key': 'CLIENT1value'}})
"""
out = {}
# scalars first
for i in range(len(self.scalars)):
entry = self.scalars[i]
try:
val = function(self, entry, **keywargs)
# bound again in case name has changed
entry = self.scalars[i]
out[entry] = val
except Exception:
if raise_errors:
raise
else:
entry = self.scalars[i]
out[entry] = False
# then sections
for i in range(len(self.sections)):
entry = self.sections[i]
if call_on_sections:
try:
function(self, entry, **keywargs)
except Exception:
if raise_errors:
raise
else:
entry = self.sections[i]
out[entry] = False
# bound again in case name has changed
entry = self.sections[i]
# previous result is discarded
out[entry] = self[entry].walk(
function,
raise_errors=raise_errors,
call_on_sections=call_on_sections,
**keywargs)
return out
def as_bool(self, key):
"""
Accepts a key as input. The corresponding value must be a string or
the objects (``True`` or 1) or (``False`` or 0). We allow 0 and 1 to
retain compatibility with Python 2.2.
If the string is one of ``True``, ``On``, ``Yes``, or ``1`` it returns
``True``.
If the string is one of ``False``, ``Off``, ``No``, or ``0`` it returns
``False``.
``as_bool`` is not case sensitive.
Any other input will raise a ``ValueError``.
>>> a = ConfigObj()
>>> a['a'] = 'fish'
>>> a.as_bool('a')
Traceback (most recent call last):
ValueError: Value "fish" is neither True nor False
>>> a['b'] = 'True'
>>> a.as_bool('b')
1
>>> a['b'] = 'off'
>>> a.as_bool('b')
0
"""
val = self[key]
if val == True:
return True
elif val == False:
return False
else:
try:
if not isinstance(val, six.string_types):
# TODO: Why do we raise a KeyError here?
raise KeyError()
else:
return self.main._bools[val.lower()]
except KeyError:
raise ValueError('Value "%s" is neither True nor False' % val)
def as_int(self, key):
"""
A convenience method which coerces the specified value to an integer.
If the value is an invalid literal for ``int``, a ``ValueError`` will
be raised.
>>> a = ConfigObj()
>>> a['a'] = 'fish'
>>> a.as_int('a')
Traceback (most recent call last):
ValueError: invalid literal for int() with base 10: 'fish'
>>> a['b'] = '1'
>>> a.as_int('b')
1
>>> a['b'] = '3.2'
>>> a.as_int('b')
Traceback (most recent call last):
ValueError: invalid literal for int() with base 10: '3.2'
"""
return int(self[key])
def as_float(self, key):
"""
A convenience method which coerces the specified value to a float.
If the value is an invalid literal for ``float``, a ``ValueError`` will
be raised.
>>> a = ConfigObj()
>>> a['a'] = 'fish'
>>> a.as_float('a') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ValueError: invalid literal for float(): fish
>>> a['b'] = '1'
>>> a.as_float('b')
1.0
>>> a['b'] = '3.2'
>>> a.as_float('b') #doctest: +ELLIPSIS
3.2...
"""
return float(self[key])
def as_list(self, key):
"""
A convenience method which fetches the specified value, guaranteeing
that it is a list.
>>> a = ConfigObj()
>>> a['a'] = 1
>>> a.as_list('a')
[1]
>>> a['a'] = (1,)
>>> a.as_list('a')
[1]
>>> a['a'] = [1]
>>> a.as_list('a')
[1]
"""
result = self[key]
if isinstance(result, (tuple, list)):
return list(result)
return [result]
def restore_default(self, key):
"""
Restore (and return) default value for the specified key.
This method will only work for a ConfigObj that was created
with a configspec and has been validated.
If there is no default value for this key, ``KeyError`` is raised.
"""
default = self.default_values[key]
dict.__setitem__(self, key, default)
if key not in self.defaults:
self.defaults.append(key)
return default
def restore_defaults(self):
"""
Recursively restore default values to all members
that have them.
This method will only work for a ConfigObj that was created
with a configspec and has been validated.
It doesn't delete or modify entries without default values.
"""
for key in self.default_values:
self.restore_default(key)
for section in self.sections:
self[section].restore_defaults()
class ConfigObj(Section):
"""An object to read, create, and write config files."""
_keyword = re.compile(r'''^ # line start
(\s*) # indentation
( # keyword
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'"=].*?) # no quotes
)
\s*=\s* # divider
(.*) # value (including list values and comments)
$ # line end
''',
re.VERBOSE)
_sectionmarker = re.compile(r'''^
(\s*) # 1: indentation
((?:\[\s*)+) # 2: section marker open
( # 3: section name open
(?:"\s*\S.*?\s*")| # at least one non-space with double quotes
(?:'\s*\S.*?\s*')| # at least one non-space with single quotes
(?:[^'"\s].*?) # at least one non-space unquoted
) # section name close
((?:\s*\])+) # 4: section marker close
\s*(\#.*)? # 5: optional comment
$''',
re.VERBOSE)
# this regexp pulls list values out as a single string
# or single values and comments
# FIXME: this regex adds a '' to the end of comma terminated lists
# workaround in ``_handle_value``
_valueexp = re.compile(r'''^
(?:
(?:
(
(?:
(?:
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\#][^,\#]*?) # unquoted
)
\s*,\s* # comma
)* # match all list items ending in a comma (if any)
)
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\#\s][^,]*?)| # unquoted
(?:(?<!,)) # Empty value
)? # last item in a list - or string value
)|
(,) # alternatively a single comma - empty list
)
\s*(\#.*)? # optional comment
$''',
re.VERBOSE)
# use findall to get the members of a list value
_listvalueexp = re.compile(r'''
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\#]?.*?) # unquoted
)
\s*,\s* # comma
''',
re.VERBOSE)
# this regexp is used for the value
# when lists are switched off
_nolistvalue = re.compile(r'''^
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'"\#].*?)| # unquoted
(?:) # Empty value
)
\s*(\#.*)? # optional comment
$''',
re.VERBOSE)
# regexes for finding triple quoted values on one line
_single_line_single = re.compile(r"^'''(.*?)'''\s*(#.*)?$")
_single_line_double = re.compile(r'^"""(.*?)"""\s*(#.*)?$')
_multi_line_single = re.compile(r"^(.*?)'''\s*(#.*)?$")
_multi_line_double = re.compile(r'^(.*?)"""\s*(#.*)?$')
_triple_quote = {
"'''": (_single_line_single, _multi_line_single),
'"""': (_single_line_double, _multi_line_double),
}
# Used by the ``istrue`` Section method
_bools = {
'yes': True, 'no': False,
'on': True, 'off': False,
'1': True, '0': False,
'true': True, 'false': False,
}
def __init__(self, infile=None, options=None, configspec=None, encoding=None,
interpolation=True, raise_errors=False, list_values=True,
create_empty=False, file_error=False, stringify=True,
indent_type=None, default_encoding=None, unrepr=False,
write_empty_values=False, _inspec=False):
"""
Parse a config file or create a config file object.
``ConfigObj(infile=None, configspec=None, encoding=None,
interpolation=True, raise_errors=False, list_values=True,
create_empty=False, file_error=False, stringify=True,
indent_type=None, default_encoding=None, unrepr=False,
write_empty_values=False, _inspec=False)``
"""
self._inspec = _inspec
# init the superclass
Section.__init__(self, self, 0, self)
infile = infile or []
_options = {'configspec': configspec,
'encoding': encoding, 'interpolation': interpolation,
'raise_errors': raise_errors, 'list_values': list_values,
'create_empty': create_empty, 'file_error': file_error,
'stringify': stringify, 'indent_type': indent_type,
'default_encoding': default_encoding, 'unrepr': unrepr,
'write_empty_values': write_empty_values}
if options is None:
options = _options
else:
import warnings
warnings.warn('Passing in an options dictionary to ConfigObj() is '
'deprecated. Use **options instead.',
DeprecationWarning, stacklevel=2)
# TODO: check the values too.
for entry in options:
if entry not in OPTION_DEFAULTS:
raise TypeError('Unrecognised option "%s".' % entry)
for entry, value in list(OPTION_DEFAULTS.items()):
if entry not in options:
options[entry] = value
keyword_value = _options[entry]
if value != keyword_value:
options[entry] = keyword_value
# XXXX this ignores an explicit list_values = True in combination
# with _inspec. The user should *never* do that anyway, but still...
if _inspec:
options['list_values'] = False
self._initialise(options)
configspec = options['configspec']
self._original_configspec = configspec
self._load(infile, configspec)
def _load(self, infile, configspec):
if isinstance(infile, six.string_types):
self.filename = infile
if os.path.isfile(infile):
with open(infile, 'rb') as h:
content = h.readlines() or []
elif self.file_error:
# raise an error if the file doesn't exist
raise IOError('Config file not found: "%s".' % self.filename)
else:
# file doesn't already exist
if self.create_empty:
# this is a good test that the filename specified
# isn't impossible - like on a non-existent device
with open(infile, 'w') as h:
h.write('')
content = []
elif isinstance(infile, (list, tuple)):
content = list(infile)
elif isinstance(infile, dict):
# initialise self
# the Section class handles creating subsections
if isinstance(infile, ConfigObj):
# get a copy of our ConfigObj
def set_section(in_section, this_section):
for entry in in_section.scalars:
this_section[entry] = in_section[entry]
for section in in_section.sections:
this_section[section] = {}
set_section(in_section[section], this_section[section])
set_section(infile, self)
else:
for entry in infile:
self[entry] = infile[entry]
del self._errors
if configspec is not None:
self._handle_configspec(configspec)
else:
self.configspec = None
return
elif getattr(infile, 'read', MISSING) is not MISSING:
# This supports file like objects
content = infile.read() or []
# needs splitting into lines - but needs doing *after* decoding
# in case it's not an 8 bit encoding
else:
raise TypeError('infile must be a filename, file like object, or list of lines.')
if content:
# don't do it for the empty ConfigObj
content = self._handle_bom(content)
# infile is now *always* a list
#
# Set the newlines attribute (first line ending it finds)
# and strip trailing '\n' or '\r' from lines
for line in content:
if (not line) or (line[-1] not in ('\r', '\n')):
continue
for end in ('\r\n', '\n', '\r'):
if line.endswith(end):
self.newlines = end
break
break
assert all(isinstance(line, six.string_types) for line in content), repr(content)
content = [line.rstrip('\r\n') for line in content]
self._parse(content)
# if we had any errors, now is the time to raise them
if self._errors:
info = "at line %s." % self._errors[0].line_number
if len(self._errors) > 1:
msg = "Parsing failed with several errors.\nFirst error %s" % info
error = ConfigObjError(msg)
else:
error = self._errors[0]
# set the errors attribute; it's a list of tuples:
# (error_type, message, line_number)
error.errors = self._errors
# set the config attribute
error.config = self
raise error
# delete private attributes
del self._errors
if configspec is None:
self.configspec = None
else:
self._handle_configspec(configspec)
def _initialise(self, options=None):
if options is None:
options = OPTION_DEFAULTS
# initialise a few variables
self.filename = None
self._errors = []
self.raise_errors = options['raise_errors']
self.interpolation = options['interpolation']
self.list_values = options['list_values']
self.create_empty = options['create_empty']
self.file_error = options['file_error']
self.stringify = options['stringify']
self.indent_type = options['indent_type']
self.encoding = options['encoding']
self.default_encoding = options['default_encoding']
self.BOM = False
self.newlines = None
self.write_empty_values = options['write_empty_values']
self.unrepr = options['unrepr']
self.initial_comment = []
self.final_comment = []
self.configspec = None
if self._inspec:
self.list_values = False
# Clear section attributes as well
Section._initialise(self)
def __repr__(self):
def _getval(key):
try:
return self[key]
except MissingInterpolationOption:
return dict.__getitem__(self, key)
return ('ConfigObj({%s})' %
', '.join([('%s: %s' % (repr(key), repr(_getval(key))))
for key in (self.scalars + self.sections)]))
def _handle_bom(self, infile):
"""
Handle any BOM, and decode if necessary.
If an encoding is specified, that *must* be used - but the BOM should
still be removed (and the BOM attribute set).
(If the encoding is wrongly specified, then a BOM for an alternative
encoding won't be discovered or removed.)
If an encoding is not specified, UTF8 or UTF16 BOM will be detected and
removed. The BOM attribute will be set. UTF16 will be decoded to
unicode.
NOTE: This method must not be called with an empty ``infile``.
Specifying the *wrong* encoding is likely to cause a
``UnicodeDecodeError``.
``infile`` must always be returned as a list of lines, but may be
passed in as a single string.
"""
if ((self.encoding is not None) and
(self.encoding.lower() not in BOM_LIST)):
# No need to check for a BOM
# the encoding specified doesn't have one
# just decode
return self._decode(infile, self.encoding)
if isinstance(infile, (list, tuple)):
line = infile[0]
else:
line = infile
if isinstance(line, six.text_type):
# it's already decoded and there's no need to do anything
# else, just use the _decode utility method to handle
# listifying appropriately
return self._decode(infile, self.encoding)
if self.encoding is not None:
# encoding explicitly supplied
# And it could have an associated BOM
# TODO: if encoding is just UTF16 - we ought to check for both
# TODO: big endian and little endian versions.
enc = BOM_LIST[self.encoding.lower()]
if enc == 'utf_16':
# For UTF16 we try big endian and little endian
for BOM, (encoding, final_encoding) in list(BOMS.items()):
if not final_encoding:
# skip UTF8
continue
if infile.startswith(BOM):
### BOM discovered
##self.BOM = True
# Don't need to remove BOM
return self._decode(infile, encoding)
# If we get this far, will *probably* raise a DecodeError
# As it doesn't appear to start with a BOM
return self._decode(infile, self.encoding)
# Must be UTF8
BOM = BOM_SET[enc]
if not line.startswith(BOM):
return self._decode(infile, self.encoding)
newline = line[len(BOM):]
# BOM removed
if isinstance(infile, (list, tuple)):
infile[0] = newline
else:
infile = newline
self.BOM = True
return self._decode(infile, self.encoding)
# No encoding specified - so we need to check for UTF8/UTF16
for BOM, (encoding, final_encoding) in list(BOMS.items()):
if not isinstance(line, six.binary_type) or not line.startswith(BOM):
# didn't specify a BOM, or it's not a bytestring
continue
else:
# BOM discovered
self.encoding = final_encoding
if not final_encoding:
self.BOM = True
# UTF8
# remove BOM
newline = line[len(BOM):]
if isinstance(infile, (list, tuple)):
infile[0] = newline
else:
infile = newline
# UTF-8
if isinstance(infile, six.text_type):
return infile.splitlines(True)
elif isinstance(infile, six.binary_type):
return infile.decode('utf-8').splitlines(True)
else:
return self._decode(infile, 'utf-8')
# UTF16 - have to decode
return self._decode(infile, encoding)
if six.PY2 and isinstance(line, str):
# don't actually do any decoding, since we're on python 2 and
# returning a bytestring is fine
return self._decode(infile, None)
# No BOM discovered and no encoding specified, default to UTF-8
if isinstance(infile, six.binary_type):
return infile.decode('utf-8').splitlines(True)
else:
return self._decode(infile, 'utf-8')
def _a_to_u(self, aString):
"""Decode ASCII strings to unicode if a self.encoding is specified."""
if isinstance(aString, six.binary_type) and self.encoding:
return aString.decode(self.encoding)
else:
return aString
def _decode(self, infile, encoding):
"""
Decode infile to unicode. Using the specified encoding.
if is a string, it also needs converting to a list.
"""
if isinstance(infile, six.string_types):
return infile.splitlines(True)
if isinstance(infile, six.binary_type):
# NOTE: Could raise a ``UnicodeDecodeError``
if encoding:
return infile.decode(encoding).splitlines(True)
else:
return infile.splitlines(True)
if encoding:
for i, line in enumerate(infile):
if isinstance(line, six.binary_type):
# NOTE: The isinstance test here handles mixed lists of unicode/string
# NOTE: But the decode will break on any non-string values
# NOTE: Or could raise a ``UnicodeDecodeError``
infile[i] = line.decode(encoding)
return infile
def _decode_element(self, line):
"""Decode element to unicode if necessary."""
if isinstance(line, six.binary_type) and self.default_encoding:
return line.decode(self.default_encoding)
else:
return line
# TODO: this may need to be modified
def _str(self, value):
"""
Used by ``stringify`` within validate, to turn non-string values
into strings.
"""
if not isinstance(value, six.string_types):
# intentially 'str' because it's just whatever the "normal"
# string type is for the python version we're dealing with
return str(value)
else:
return value
def _parse(self, infile):
"""Actually parse the config file."""
temp_list_values = self.list_values
if self.unrepr:
self.list_values = False
comment_list = []
done_start = False
this_section = self
maxline = len(infile) - 1
cur_index = -1
reset_comment = False
while cur_index < maxline:
if reset_comment:
comment_list = []
cur_index += 1
line = infile[cur_index]
sline = line.strip()
# do we have anything on the line ?
if not sline or sline.startswith('#'):
reset_comment = False
comment_list.append(line)
continue
if not done_start:
# preserve initial comment
self.initial_comment = comment_list
comment_list = []
done_start = True
reset_comment = True
# first we check if it's a section marker
mat = self._sectionmarker.match(line)
if mat is not None:
# is a section line
(indent, sect_open, sect_name, sect_close, comment) = mat.groups()
if indent and (self.indent_type is None):
self.indent_type = indent
cur_depth = sect_open.count('[')
if cur_depth != sect_close.count(']'):
self._handle_error("Cannot compute the section depth",
NestingError, infile, cur_index)
continue
if cur_depth < this_section.depth:
# the new section is dropping back to a previous level
try:
parent = self._match_depth(this_section,
cur_depth).parent
except SyntaxError:
self._handle_error("Cannot compute nesting level",
NestingError, infile, cur_index)
continue
elif cur_depth == this_section.depth:
# the new section is a sibling of the current section
parent = this_section.parent
elif cur_depth == this_section.depth + 1:
# the new section is a child the current section
parent = this_section
else:
self._handle_error("Section too nested",
NestingError, infile, cur_index)
continue
sect_name = self._unquote(sect_name)
if sect_name in parent:
self._handle_error('Duplicate section name',
DuplicateError, infile, cur_index)
continue
# create the new section
this_section = Section(
parent,
cur_depth,
self,
name=sect_name)
parent[sect_name] = this_section
parent.inline_comments[sect_name] = comment
parent.comments[sect_name] = comment_list
continue
#
# it's not a section marker,
# so it should be a valid ``key = value`` line
mat = self._keyword.match(line)
if mat is None:
self._handle_error(
'Invalid line ({0!r}) (matched as neither section nor keyword)'.format(line),
ParseError, infile, cur_index)
else:
# is a keyword value
# value will include any inline comment
(indent, key, value) = mat.groups()
if indent and (self.indent_type is None):
self.indent_type = indent
# check for a multiline value
if value[:3] in ['"""', "'''"]:
try:
value, comment, cur_index = self._multiline(
value, infile, cur_index, maxline)
except SyntaxError:
self._handle_error(
'Parse error in multiline value',
ParseError, infile, cur_index)
continue
else:
if self.unrepr:
comment = ''
try:
value = unrepr(value)
except Exception as e:
if type(e) == UnknownType:
msg = 'Unknown name or type in value'
else:
msg = 'Parse error from unrepr-ing multiline value'
self._handle_error(msg, UnreprError, infile,
cur_index)
continue
else:
if self.unrepr:
comment = ''
try:
value = unrepr(value)
except Exception as e:
if isinstance(e, UnknownType):
msg = 'Unknown name or type in value'
else:
msg = 'Parse error from unrepr-ing value'
self._handle_error(msg, UnreprError, infile,
cur_index)
continue
else:
# extract comment and lists
try:
(value, comment) = self._handle_value(value)
except SyntaxError:
self._handle_error(
'Parse error in value',
ParseError, infile, cur_index)
continue
#
key = self._unquote(key)
if key in this_section:
self._handle_error(
'Duplicate keyword name',
DuplicateError, infile, cur_index)
continue
# add the key.
# we set unrepr because if we have got this far we will never
# be creating a new section
this_section.__setitem__(key, value, unrepr=True)
this_section.inline_comments[key] = comment
this_section.comments[key] = comment_list
continue
#
if self.indent_type is None:
# no indentation used, set the type accordingly<|fim▁hole|> if not self and not self.initial_comment:
self.initial_comment = comment_list
elif not reset_comment:
self.final_comment = comment_list
self.list_values = temp_list_values
def _match_depth(self, sect, depth):
"""
Given a section and a depth level, walk back through the sections
parents to see if the depth level matches a previous section.
Return a reference to the right section,
or raise a SyntaxError.
"""
while depth < sect.depth:
if sect is sect.parent:
# we've reached the top level already
raise SyntaxError()
sect = sect.parent
if sect.depth == depth:
return sect
# shouldn't get here
raise SyntaxError()
def _handle_error(self, text, ErrorClass, infile, cur_index):
"""
Handle an error according to the error settings.
Either raise the error or store it.
The error will have occured at ``cur_index``
"""
line = infile[cur_index]
cur_index += 1
message = '{0} at line {1}.'.format(text, cur_index)
error = ErrorClass(message, cur_index, line)
if self.raise_errors:
# raise the error - parsing stops here
raise error
# store the error
# reraise when parsing has finished
self._errors.append(error)
def _unquote(self, value):
"""Return an unquoted version of a value"""
if not value:
# should only happen during parsing of lists
raise SyntaxError
if (value[0] == value[-1]) and (value[0] in ('"', "'")):
value = value[1:-1]
return value
def _quote(self, value, multiline=True):
"""
Return a safely quoted version of a value.
Raise a ConfigObjError if the value cannot be safely quoted.
If multiline is ``True`` (default) then use triple quotes
if necessary.
* Don't quote values that don't need it.
* Recursively quote members of a list and return a comma joined list.
* Multiline is ``False`` for lists.
* Obey list syntax for empty and single member lists.
If ``list_values=False`` then the value is only quoted if it contains
a ``\\n`` (is multiline) or '#'.
If ``write_empty_values`` is set, and the value is an empty string, it
won't be quoted.
"""
if multiline and self.write_empty_values and value == '':
# Only if multiline is set, so that it is used for values not
# keys, and not values that are part of a list
return ''
if multiline and isinstance(value, (list, tuple)):
if not value:
return ','
elif len(value) == 1:
return self._quote(value[0], multiline=False) + ','
return ', '.join([self._quote(val, multiline=False)
for val in value])
if not isinstance(value, six.string_types):
if self.stringify:
# intentially 'str' because it's just whatever the "normal"
# string type is for the python version we're dealing with
value = str(value)
else:
raise TypeError('Value "%s" is not a string.' % value)
if not value:
return '""'
no_lists_no_quotes = not self.list_values and '\n' not in value and '#' not in value
need_triple = multiline and ((("'" in value) and ('"' in value)) or ('\n' in value ))
hash_triple_quote = multiline and not need_triple and ("'" in value) and ('"' in value) and ('#' in value)
check_for_single = (no_lists_no_quotes or not need_triple) and not hash_triple_quote
if check_for_single:
if not self.list_values:
# we don't quote if ``list_values=False``
quot = noquot
# for normal values either single or double quotes will do
elif '\n' in value:
# will only happen if multiline is off - e.g. '\n' in key
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
elif ((value[0] not in wspace_plus) and
(value[-1] not in wspace_plus) and
(',' not in value)):
quot = noquot
else:
quot = self._get_single_quote(value)
else:
# if value has '\n' or "'" *and* '"', it will need triple quotes
quot = self._get_triple_quote(value)
if quot == noquot and '#' in value and self.list_values:
quot = self._get_single_quote(value)
return quot % value
def _get_single_quote(self, value):
if ("'" in value) and ('"' in value):
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
elif '"' in value:
quot = squot
else:
quot = dquot
return quot
def _get_triple_quote(self, value):
if (value.find('"""') != -1) and (value.find("'''") != -1):
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
if value.find('"""') == -1:
quot = tdquot
else:
quot = tsquot
return quot
def _handle_value(self, value):
"""
Given a value string, unquote, remove comment,
handle lists. (including empty and single member lists)
"""
if self._inspec:
# Parsing a configspec so don't handle comments
return (value, '')
# do we look for lists in values ?
if not self.list_values:
mat = self._nolistvalue.match(value)
if mat is None:
raise SyntaxError()
# NOTE: we don't unquote here
return mat.groups()
#
mat = self._valueexp.match(value)
if mat is None:
# the value is badly constructed, probably badly quoted,
# or an invalid list
raise SyntaxError()
(list_values, single, empty_list, comment) = mat.groups()
if (list_values == '') and (single is None):
# change this if you want to accept empty values
raise SyntaxError()
# NOTE: note there is no error handling from here if the regex
# is wrong: then incorrect values will slip through
if empty_list is not None:
# the single comma - meaning an empty list
return ([], comment)
if single is not None:
# handle empty values
if list_values and not single:
# FIXME: the '' is a workaround because our regex now matches
# '' at the end of a list if it has a trailing comma
single = None
else:
single = single or '""'
single = self._unquote(single)
if list_values == '':
# not a list value
return (single, comment)
the_list = self._listvalueexp.findall(list_values)
the_list = [self._unquote(val) for val in the_list]
if single is not None:
the_list += [single]
return (the_list, comment)
def _multiline(self, value, infile, cur_index, maxline):
"""Extract the value, where we are in a multiline situation."""
quot = value[:3]
newvalue = value[3:]
single_line = self._triple_quote[quot][0]
multi_line = self._triple_quote[quot][1]
mat = single_line.match(value)
if mat is not None:
retval = list(mat.groups())
retval.append(cur_index)
return retval
elif newvalue.find(quot) != -1:
# somehow the triple quote is missing
raise SyntaxError()
#
while cur_index < maxline:
cur_index += 1
newvalue += '\n'
line = infile[cur_index]
if line.find(quot) == -1:
newvalue += line
else:
# end of multiline, process it
break
else:
# we've got to the end of the config, oops...
raise SyntaxError()
mat = multi_line.match(line)
if mat is None:
# a badly formed line
raise SyntaxError()
(value, comment) = mat.groups()
return (newvalue + value, comment, cur_index)
def _handle_configspec(self, configspec):
"""Parse the configspec."""
# FIXME: Should we check that the configspec was created with the
# correct settings ? (i.e. ``list_values=False``)
if not isinstance(configspec, ConfigObj):
try:
configspec = ConfigObj(configspec,
raise_errors=True,
file_error=True,
_inspec=True)
except ConfigObjError as e:
# FIXME: Should these errors have a reference
# to the already parsed ConfigObj ?
raise ConfigspecError('Parsing configspec failed: %s' % e)
except IOError as e:
raise IOError('Reading configspec failed: %s' % e)
self.configspec = configspec
def _set_configspec(self, section, copy):
"""
Called by validate. Handles setting the configspec on subsections
including sections to be validated by __many__
"""
configspec = section.configspec
many = configspec.get('__many__')
if isinstance(many, dict):
for entry in section.sections:
if entry not in configspec:
section[entry].configspec = many
for entry in configspec.sections:
if entry == '__many__':
continue
if entry not in section:
section[entry] = {}
section[entry]._created = True
if copy:
# copy comments
section.comments[entry] = configspec.comments.get(entry, [])
section.inline_comments[entry] = configspec.inline_comments.get(entry, '')
# Could be a scalar when we expect a section
if isinstance(section[entry], Section):
section[entry].configspec = configspec[entry]
def _write_line(self, indent_string, entry, this_entry, comment):
"""Write an individual line, for the write method"""
# NOTE: the calls to self._quote here handles non-StringType values.
if not self.unrepr:
val = self._decode_element(self._quote(this_entry))
else:
val = repr(this_entry)
return '%s%s%s%s%s' % (indent_string,
self._decode_element(self._quote(entry, multiline=False)),
self._a_to_u(' = '),
val,
self._decode_element(comment))
def _write_marker(self, indent_string, depth, entry, comment):
"""Write a section marker line"""
return '%s%s%s%s%s' % (indent_string,
self._a_to_u('[' * depth),
self._quote(self._decode_element(entry), multiline=False),
self._a_to_u(']' * depth),
self._decode_element(comment))
def _handle_comment(self, comment):
"""Deal with a comment."""
if not comment:
return ''
start = self.indent_type
if not comment.startswith('#'):
start += self._a_to_u(' # ')
return (start + comment)
# Public methods
def write(self, outfile=None, section=None):
"""
Write the current ConfigObj as a file
tekNico: FIXME: use StringIO instead of real files
>>> filename = a.filename
>>> a.filename = 'test.ini'
>>> a.write()
>>> a.filename = filename
>>> a == ConfigObj('test.ini', raise_errors=True)
1
>>> import os
>>> os.remove('test.ini')
"""
if self.indent_type is None:
# this can be true if initialised from a dictionary
self.indent_type = DEFAULT_INDENT_TYPE
out = []
cs = self._a_to_u('#')
csp = self._a_to_u('# ')
if section is None:
int_val = self.interpolation
self.interpolation = False
section = self
for line in self.initial_comment:
line = self._decode_element(line)
stripped_line = line.strip()
if stripped_line and not stripped_line.startswith(cs):
line = csp + line
out.append(line)
indent_string = self.indent_type * section.depth
for entry in (section.scalars + section.sections):
if entry in section.defaults:
# don't write out default values
continue
for comment_line in section.comments[entry]:
comment_line = self._decode_element(comment_line.lstrip())
if comment_line and not comment_line.startswith(cs):
comment_line = csp + comment_line
out.append(indent_string + comment_line)
this_entry = section[entry]
comment = self._handle_comment(section.inline_comments[entry])
if isinstance(this_entry, Section):
# a section
out.append(self._write_marker(
indent_string,
this_entry.depth,
entry,
comment))
out.extend(self.write(section=this_entry))
else:
out.append(self._write_line(
indent_string,
entry,
this_entry,
comment))
if section is self:
for line in self.final_comment:
line = self._decode_element(line)
stripped_line = line.strip()
if stripped_line and not stripped_line.startswith(cs):
line = csp + line
out.append(line)
self.interpolation = int_val
if section is not self:
return out
if (self.filename is None) and (outfile is None):
# output a list of lines
# might need to encode
# NOTE: This will *screw* UTF16, each line will start with the BOM
if self.encoding:
out = [l.encode(self.encoding) for l in out]
if (self.BOM and ((self.encoding is None) or
(BOM_LIST.get(self.encoding.lower()) == 'utf_8'))):
# Add the UTF8 BOM
if not out:
out.append('')
out[0] = BOM_UTF8 + out[0]
return out
# Turn the list to a string, joined with correct newlines
newline = self.newlines or os.linesep
if (getattr(outfile, 'mode', None) is not None and outfile.mode == 'w'
and sys.platform == 'win32' and newline == '\r\n'):
# Windows specific hack to avoid writing '\r\r\n'
newline = '\n'
output = self._a_to_u(newline).join(out)
if not output.endswith(newline):
output += newline
if isinstance(output, six.binary_type):
output_bytes = output
else:
output_bytes = output.encode(self.encoding or
self.default_encoding or
'ascii')
if self.BOM and ((self.encoding is None) or match_utf8(self.encoding)):
# Add the UTF8 BOM
output_bytes = BOM_UTF8 + output_bytes
if outfile is not None:
outfile.write(output_bytes)
else:
with open(self.filename, 'wb') as h:
h.write(output_bytes)
def validate(self, validator, preserve_errors=False, copy=False,
section=None):
"""
Test the ConfigObj against a configspec.
It uses the ``validator`` object from *validate.py*.
To run ``validate`` on the current ConfigObj, call: ::
test = config.validate(validator)
(Normally having previously passed in the configspec when the ConfigObj
was created - you can dynamically assign a dictionary of checks to the
``configspec`` attribute of a section though).
It returns ``True`` if everything passes, or a dictionary of
pass/fails (True/False). If every member of a subsection passes, it
will just have the value ``True``. (It also returns ``False`` if all
members fail).
In addition, it converts the values from strings to their native
types if their checks pass (and ``stringify`` is set).
If ``preserve_errors`` is ``True`` (``False`` is default) then instead
of a marking a fail with a ``False``, it will preserve the actual
exception object. This can contain info about the reason for failure.
For example the ``VdtValueTooSmallError`` indicates that the value
supplied was too small. If a value (or section) is missing it will
still be marked as ``False``.
You must have the validate module to use ``preserve_errors=True``.
You can then use the ``flatten_errors`` function to turn your nested
results dictionary into a flattened list of failures - useful for
displaying meaningful error messages.
"""
if section is None:
if self.configspec is None:
raise ValueError('No configspec supplied.')
if preserve_errors:
# We do this once to remove a top level dependency on the validate module
# Which makes importing configobj faster
from validate import VdtMissingValue
self._vdtMissingValue = VdtMissingValue
section = self
if copy:
section.initial_comment = section.configspec.initial_comment
section.final_comment = section.configspec.final_comment
section.encoding = section.configspec.encoding
section.BOM = section.configspec.BOM
section.newlines = section.configspec.newlines
section.indent_type = section.configspec.indent_type
#
# section.default_values.clear() #??
configspec = section.configspec
self._set_configspec(section, copy)
def validate_entry(entry, spec, val, missing, ret_true, ret_false):
section.default_values.pop(entry, None)
try:
section.default_values[entry] = validator.get_default_value(configspec[entry])
except (KeyError, AttributeError, validator.baseErrorClass):
# No default, bad default or validator has no 'get_default_value'
# (e.g. SimpleVal)
pass
try:
check = validator.check(spec,
val,
missing=missing
)
except validator.baseErrorClass as e:
if not preserve_errors or isinstance(e, self._vdtMissingValue):
out[entry] = False
else:
# preserve the error
out[entry] = e
ret_false = False
ret_true = False
else:
ret_false = False
out[entry] = True
if self.stringify or missing:
# if we are doing type conversion
# or the value is a supplied default
if not self.stringify:
if isinstance(check, (list, tuple)):
# preserve lists
check = [self._str(item) for item in check]
elif missing and check is None:
# convert the None from a default to a ''
check = ''
else:
check = self._str(check)
if (check != val) or missing:
section[entry] = check
if not copy and missing and entry not in section.defaults:
section.defaults.append(entry)
return ret_true, ret_false
#
out = {}
ret_true = True
ret_false = True
unvalidated = [k for k in section.scalars if k not in configspec]
incorrect_sections = [k for k in configspec.sections if k in section.scalars]
incorrect_scalars = [k for k in configspec.scalars if k in section.sections]
for entry in configspec.scalars:
if entry in ('__many__', '___many___'):
# reserved names
continue
if (not entry in section.scalars) or (entry in section.defaults):
# missing entries
# or entries from defaults
missing = True
val = None
if copy and entry not in section.scalars:
# copy comments
section.comments[entry] = (
configspec.comments.get(entry, []))
section.inline_comments[entry] = (
configspec.inline_comments.get(entry, ''))
#
else:
missing = False
val = section[entry]
ret_true, ret_false = validate_entry(entry, configspec[entry], val,
missing, ret_true, ret_false)
many = None
if '__many__' in configspec.scalars:
many = configspec['__many__']
elif '___many___' in configspec.scalars:
many = configspec['___many___']
if many is not None:
for entry in unvalidated:
val = section[entry]
ret_true, ret_false = validate_entry(entry, many, val, False,
ret_true, ret_false)
unvalidated = []
for entry in incorrect_scalars:
ret_true = False
if not preserve_errors:
out[entry] = False
else:
ret_false = False
msg = 'Value %r was provided as a section' % entry
out[entry] = validator.baseErrorClass(msg)
for entry in incorrect_sections:
ret_true = False
if not preserve_errors:
out[entry] = False
else:
ret_false = False
msg = 'Section %r was provided as a single value' % entry
out[entry] = validator.baseErrorClass(msg)
# Missing sections will have been created as empty ones when the
# configspec was read.
for entry in section.sections:
# FIXME: this means DEFAULT is not copied in copy mode
if section is self and entry == 'DEFAULT':
continue
if section[entry].configspec is None:
unvalidated.append(entry)
continue
if copy:
section.comments[entry] = configspec.comments.get(entry, [])
section.inline_comments[entry] = configspec.inline_comments.get(entry, '')
check = self.validate(validator, preserve_errors=preserve_errors, copy=copy, section=section[entry])
out[entry] = check
if check == False:
ret_true = False
elif check == True:
ret_false = False
else:
ret_true = False
section.extra_values = unvalidated
if preserve_errors and not section._created:
# If the section wasn't created (i.e. it wasn't missing)
# then we can't return False, we need to preserve errors
ret_false = False
#
if ret_false and preserve_errors and out:
# If we are preserving errors, but all
# the failures are from missing sections / values
# then we can return False. Otherwise there is a
# real failure that we need to preserve.
ret_false = not any(out.values())
if ret_true:
return True
elif ret_false:
return False
return out
def reset(self):
"""Clear ConfigObj instance and restore to 'freshly created' state."""
self.clear()
self._initialise()
# FIXME: Should be done by '_initialise', but ConfigObj constructor (and reload)
# requires an empty dictionary
self.configspec = None
# Just to be sure ;-)
self._original_configspec = None
def reload(self):
"""
Reload a ConfigObj from file.
This method raises a ``ReloadError`` if the ConfigObj doesn't have
a filename attribute pointing to a file.
"""
if not isinstance(self.filename, six.string_types):
raise ReloadError()
filename = self.filename
current_options = {}
for entry in OPTION_DEFAULTS:
if entry == 'configspec':
continue
current_options[entry] = getattr(self, entry)
configspec = self._original_configspec
current_options['configspec'] = configspec
self.clear()
self._initialise(current_options)
self._load(filename, configspec)
class SimpleVal(object):
"""
A simple validator.
Can be used to check that all members expected are present.
To use it, provide a configspec with all your members in (the value given
will be ignored). Pass an instance of ``SimpleVal`` to the ``validate``
method of your ``ConfigObj``. ``validate`` will return ``True`` if all
members are present, or a dictionary with True/False meaning
present/missing. (Whole missing sections will be replaced with ``False``)
"""
def __init__(self):
self.baseErrorClass = ConfigObjError
def check(self, check, member, missing=False):
"""A dummy check method, always returns the value unchanged."""
if missing:
raise self.baseErrorClass()
return member
def flatten_errors(cfg, res, levels=None, results=None):
"""
An example function that will turn a nested dictionary of results
(as returned by ``ConfigObj.validate``) into a flat list.
``cfg`` is the ConfigObj instance being checked, ``res`` is the results
dictionary returned by ``validate``.
(This is a recursive function, so you shouldn't use the ``levels`` or
``results`` arguments - they are used by the function.)
Returns a list of keys that failed. Each member of the list is a tuple::
([list of sections...], key, result)
If ``validate`` was called with ``preserve_errors=False`` (the default)
then ``result`` will always be ``False``.
*list of sections* is a flattened list of sections that the key was found
in.
If the section was missing (or a section was expected and a scalar provided
- or vice-versa) then key will be ``None``.
If the value (or section) was missing then ``result`` will be ``False``.
If ``validate`` was called with ``preserve_errors=True`` and a value
was present, but failed the check, then ``result`` will be the exception
object returned. You can use this as a string that describes the failure.
For example *The value "3" is of the wrong type*.
"""
if levels is None:
# first time called
levels = []
results = []
if res == True:
return sorted(results)
if res == False or isinstance(res, Exception):
results.append((levels[:], None, res))
if levels:
levels.pop()
return sorted(results)
for (key, val) in list(res.items()):
if val == True:
continue
if isinstance(cfg.get(key), dict):
# Go down one level
levels.append(key)
flatten_errors(cfg[key], val, levels, results)
continue
results.append((levels[:], key, val))
#
# Go up one level
if levels:
levels.pop()
#
return sorted(results)
def get_extra_values(conf, _prepend=()):
"""
Find all the values and sections not in the configspec from a validated
ConfigObj.
``get_extra_values`` returns a list of tuples where each tuple represents
either an extra section, or an extra value.
The tuples contain two values, a tuple representing the section the value
is in and the name of the extra values. For extra values in the top level
section the first member will be an empty tuple. For values in the 'foo'
section the first member will be ``('foo',)``. For members in the 'bar'
subsection of the 'foo' section the first member will be ``('foo', 'bar')``.
NOTE: If you call ``get_extra_values`` on a ConfigObj instance that hasn't
been validated it will return an empty list.
"""
out = []
out.extend([(_prepend, name) for name in conf.extra_values])
for name in conf.sections:
if name not in conf.extra_values:
out.extend(get_extra_values(conf[name], _prepend + (name,)))
return out
"""*A programming language is a medium of expression.* - Paul Graham"""<|fim▁end|>
|
self.indent_type = ''
# preserve the final comment
|
<|file_name|>get-display-name.js<|end_file_name|><|fim▁begin|>function getDisplayName(WrappedComponent) {
return WrappedComponent.displayName || WrappedComponent.name || "Component";
}
<|fim▁hole|>export default getDisplayName;<|fim▁end|>
| |
<|file_name|>bad-value-ident-true.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn true() { } //~ ERROR expected identifier, found keyword `true`<|fim▁hole|><|fim▁end|>
|
fn main() { }
|
<|file_name|>test_data.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2016 Didotech srl (http://www.didotech.com)
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from team_system_template import cash_book, account_template, tax_template
from team_system_template import deadline_book, industrial_accounting_template, industrial_accounting
tax_data = tax_template.format(**{
'taxable': 240000000, # Imponibile 6 dec?
'vat_code': 22, # Aliquota Iva o Codice esenzione
'agro_vat_code': 0, # Aliquota iva di compensazione agricola
'vat11_code': 0,
'vat_total': 52800}
) * 8
account_data = account_template.format(**{
'account_proceeds': 5810502,
'total_proceeds': 240000000 # Imponibile 6 dec?
}) * 8
cash_book_values = {
'company_id': 1,
'version': 3,
'type': 0,
'partner_id': 34,
'name': 'Cliente prova con nome estremamente lungo'[:32],
'address': 'via Tre Porcellini'[:30],
'zip': 35020,
'city': 'Padova',
'province': 'PD'[:2],
'fiscalcode': 'RSSMRA85T10A562S',
'vat_number': 01032450072,
'individual': True and 'S' or 'N', # 134
'space': 0, # Posizione spazio fra cognome nome
# Estero:
'country': 0, # Codice paese estero di residenza. Dove si prende il codice???
'vat_ext': '', # Solo 12 caratteri??? Doveva essere 14... Ex (Croazia): HR12345678901, Sweden: SE999999999901
'fiscalcode_ext': '',
# Dati di nascita,se questi dati sono vuoti vengono presi dal codice fiscale.
'sex': 'M', # M/F 173
'birthday': 01012001, # ggmmaaaa
'city_of_birth': 'Palermo', # KGB?
'province_of_birth': 'PA',
'phone_prefix': '091',
'phone': '1234567',
'fax_prefix': '0921',
'fax': '7890123',
# Solo per i fornitori 246 -
'account_code': 9999999, # Codice conto di costo abituale
'payment_conditions_code': 4444, # Codice condizioni di pagamento
'abi': 3002,
'cab': 3280,
'partner_interm': 2, # Codice intermedio clienti / fornitori 267
# Dati fattura 268
'causal': 1, # Codice causale movimento
# Fattura di vendita=001
# Nota Credito = 002
# Fattura di acquisto=011
# Corrispettivo=020
# Movimenti diversi a diversi=027
# ( E' possibile indicare anche una causale multi collegata a una causale iva es. 101 collegata alla 1 )
# Vendita agenzia di viaggio=causale collegata alla 1 o alla 20 con il campo agenzia di viaggio = S
# Acquisti agenzia di viaggio=causale collagta alla 11 con il campo agenzia di viaggio = S
'causal_description': 'FATT. VENDITA',
'causal_ext': 'Causale aggiuntiva',
'causal_ext_1': 'Causale aggiuntiva 1',
'causal_ext_2': 'Causale aggiuntiva 2',
'registration_date': 0, # Se 0 si intende uguale alla data documento
'document_date': 01012016,
'document_number': 345, # Numero documento fornitore compreso sezionale
'document_number_no_sectional': 34, # Numero documento (numero doc senza sezionale)
'vat_sectional': 22,
'account_extract': 1501, # Estratto conto Numero partita (numero doc + sezionale (tutto unito):
# es. 1501 per una fattura numero 15 del sez. 1)
'account_extract_year': 2016, # Estratto conto Anno partita (anno di emissione della fattura in formato AAAA)
'ae_currency': 0, # Estratto conto in valuta Codice valuta estera
'ae_exchange_rate': 1000000, # 13(7+6 dec)
'ae_date': 23012016,
'ae_total_currency': 240000, # 16(13+3dec)
'ae_total_currency_vat': 52800, # 16(13+3dec)
'plafond_month': 012016, # MMAAAA Riferimento PLAFOND e fatture diferite
# Dati iva
'tax_data': tax_data,
# Totale fattura
'invoice_total': 240000000, # Imponibile 6 dec?
# Conti di ricavo/costo
'account_data': account_data,
# Dati eventuale pagamento fattura o movimenti diversi
# Iva Editoria
'vat_collectability': 0, # 0=Immediata 1=Differita 2=Differita DL. 185/08
# 3=Immediata per note credito/debito 4=Split payment
# R=Risconto C=Competenza
# N=Non aggiorna estratto conto
'val_0': 0,
'empty': ''
}
deadline_book_values = {
'company_id': 1,
'version': 3,
'type': 1,
# Dati INTRASTAT
'val_0': 0,
'empty': '',
# Dati portafoglio
'payment_condition': 0, # ??? Codice condizione di pagamento
'abi': 0, # ???
'cab': 0, # ???
'agency_description': '', # Descrizione agenzia
'total_number_of_payments': 0, # ??? Numero totale rate
'invoice_total': 0, # ??? Totale documento (totale fattura)
# Dettaglio effetti
'payment_count': 0, # ??? Numero rata
'payment_deadline': 0, # ??? Data scadenza
'document_type': 0, # Tipo effetto
# 1=Tratta<|fim▁hole|> # 5=Solo descrittivo
# 6=Contanti alla consegna
'payment_total': 0, # ??? Importo effetto
'payment_total_currency': 0, # Portafoglio in valuta. Importo effetto in valuta
'total_stamps': 0, # Importo bolli
'payment_stamp_currency': 0, # Portafoglio in valuta. Importo bolli in valuta
'payment_state': '', # ??? Stato effetto 0=Aperto 1=Chiuso 2=Insoluto 3=Personalizzato
'payment_subtype': '', # Sottotipo rimessa diretta
'agent_code': 0, # Codice agente
'paused_payment': '', # Effetto sospeso
'cig': '',
'cup': '',
# Movimenti INTRASTAT BENI dati aggiuntivi...
}
def get_accounting_data():
empty_accounting = {
'val_0': 0,
'empty': '',
'causal': 0, # ??? Causale cont. industr.
# Fatt vendita = 001
# Fatt acquisto = 002
'account': 0, # ??? Conto cont. Industriale
# 1 = sistemi
# 2 = Noleggi
# 3 = domotica
'account_proceeds': 0, # ??? Voce di spesa / ricavo (uguale ai conti di ricavo contabilità generale ma con uno 0 in più)
# 58100501
# 58100502
# 58100503
'sign': '', # ??? Segno ( D o A )
'total_ammount': 0, # Importo movimento o costo complessivo
}
accounting_data = ''
for k in range(0, 20):
accounting_data += industrial_accounting_template.format(**empty_accounting)
return accounting_data
industrial_accounting_values = {
'company_id': 1,
'version': 3,
'type': 2,
'val_0': 0,
# 'empty': '',
# CONTAB. INDUSTRIALE 8
'accounting_data': get_accounting_data()
}
if __name__ == '__main__':
record_type = 0
if record_type == 0:
record = cash_book.format(**cash_book_values)
elif record_type == 1:
record = deadline_book.format(**deadline_book_values)
elif record_type == 2:
record = industrial_accounting.format(**industrial_accounting_values)
print record
# for s in record:
# print 'X:', s
print len(record)<|fim▁end|>
|
# 2=Ricevuta bancaria
# 3=Rimessa diretta
# 4=Cessioni
|
<|file_name|>ChangeEvent.java<|end_file_name|><|fim▁begin|>/*
* #%L
* GwtMaterial
* %%
* Copyright (C) 2015 - 2018 GwtMaterialDesign
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
package gwt.material.design.incubator.client.keyboard.events;
import com.google.gwt.event.shared.EventHandler;
import com.google.gwt.event.shared.GwtEvent;
import com.google.gwt.event.shared.HasHandlers;
//@formatter:off
/**
* Executes the callback function on input change. Returns the current input’s string.
*
* @author kevzlou7979
*/<|fim▁hole|>
private String input;
public ChangeEvent(String input) {
this.input = input;
}
public static Type<ChangeHandler> getType() {
return TYPE;
}
public static void fire(HasHandlers source, String message) {
source.fireEvent(new ChangeEvent(message));
}
@Override
public Type<ChangeHandler> getAssociatedType() {
return TYPE;
}
@Override
protected void dispatch(ChangeHandler handler) {
handler.onChange(this);
}
public String getInput() {
return input;
}
public interface ChangeHandler extends EventHandler {
void onChange(ChangeEvent event);
}
}<|fim▁end|>
|
public class ChangeEvent extends GwtEvent<ChangeEvent.ChangeHandler> {
public static final Type<ChangeHandler> TYPE = new Type<>();
|
<|file_name|>server.go<|end_file_name|><|fim▁begin|>package ga
import (
"bytes"
"crypto/hmac"
"crypto/sha256"
"encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"runtime"
"time"
)
const (
// PkgVersion is the current version of this package. Follows major, minor and
// patch conventions
PkgVersion = "0.0.2"
// APIVersion is the current version supported by GameAnalytics
APIVersion = 2
// SDKVersion is the current version supported by GameAnalytics
SDKVersion = "rest api v2"
// InitRoute is the url part for the init request
InitRoute = "init"
// EventsRoute is the url part for events request
EventsRoute = "events"
// SandboxGameKey is the game key for the GameAnalytics sandbox-api
SandboxGameKey = "5c6bcb5402204249437fb5a7a80a4959"
// SandboxSecretKey is the secret key for the GameAnalytics sandbox-api
SandboxSecretKey = "16813a12f718bc5c620f56944e1abc3ea13ccbac"
)
// APIStatus is the GameAnalytics response of the init event. If Enabled is
// false, the server shouldn't send any events.
type APIStatus struct {
Enabled bool
ServerTimestamp int `json:"server_ts"`
Flags []string
}
// Server wraps the API endpoint and allows events to be sent
type Server struct {
// GameKey provided by GameAnalytics for the account
GameKey string `json:"-"`
// SecretKey provided by GameAnalytics for the account
SecretKey string `json:"-"`
// URL endpoint for GameAnalytics API
URL string `json:"-"`
// Platform represents the platform of the SDK
Platform string `json:"platform"`
// OSVersion represents the Operational System Version of the SDK
OSVersion string `json:"os_version"`
// SDKVersion is the version of the SDK
SDKVersion string `json:"sdk_version"`
// Offset from GameAnalytics API and this server
TimestampOffset int `json:"-"`
APIStatus
}
// NewServer returns a server with default values for the GameAnalytics
// custom SDK implementation.
func NewServer(gameKey, secretKey string) *Server {
return &Server{
URL: fmt.Sprintf("http://api.gameanalytics.com/v%d", APIVersion),
SDKVersion: SDKVersion,
OSVersion: runtime.Version(),
Platform: "go",
GameKey: gameKey,
SecretKey: secretKey,
}
}
// NewSandboxServer return a server with default values for the GameAnalytics
// sandbox API
func NewSandboxServer() *Server {
return &Server{
URL: fmt.Sprintf("http://sandbox-api.gameanalytics.com/v%d", APIVersion),
SDKVersion: SDKVersion,
OSVersion: runtime.Version(),
Platform: "go",
GameKey: SandboxGameKey,
SecretKey: SandboxSecretKey,
}
}
// Start does the initial request to GameAnalytics API
func (s *Server) Start() error {
payload, err := json.Marshal(s)
if err != nil {
return fmt.Errorf("Init marshal payload failed (%v)", err)
}
body, err := s.post(InitRoute, payload)
if err != nil {
return err
}
err = json.Unmarshal([]byte(body), &s.APIStatus)
if err != nil {
return fmt.Errorf("APIStatus unmarshal failed (%v)", err)
}
if !s.Enabled {
return fmt.Errorf("API is disabled. Server can't send any events")
}
epoch := int(time.Now().Unix())
s.TimestampOffset = s.ServerTimestamp - epoch
return nil
}
// SendEvent posts a single event to GameAnalytics using the server config
func (s *Server) SendEvent(e Event) error {
return s.SendEvents([]Event{e})
}
<|fim▁hole|> if err != nil {
return fmt.Errorf("Init marshal payload failed (%v)", err)
}
result, err := s.post(EventsRoute, payload)
if err != nil {
return err
}
log.Printf("[INFO] Event sent (%s), response: %s\n", payload, result)
return nil
}
// Post sends a payload using the server config
func (s *Server) post(route string, payload []byte) ([]byte, error) {
url := fmt.Sprintf("%s/%s/%s", s.URL, s.GameKey, route)
req, err := http.NewRequest("POST", url, bytes.NewBuffer(payload))
if err != nil {
return nil, fmt.Errorf("Preparing request failed (%v)", err)
}
auth := computeHmac256(payload, s.SecretKey)
req.Header.Set("Authorization", auth)
req.Header.Set("Accept", "application/json")
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Content-Encoding", "application/json") //TODO add gzip compression
client := &http.Client{}
res, err := client.Do(req)
if err != nil {
return nil, fmt.Errorf("Server request failed (%v)", err)
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
if res.StatusCode != 200 {
return nil, fmt.Errorf("Expected status code 200, got %d. Body: %s",
res.StatusCode, body)
}
return []byte(body), nil
}
// computeHmac256 returns the raw body content from the request using the secret
// key (private key) as the hashing key and then encoding it using base64.
func computeHmac256(payload []byte, key string) string {
h := hmac.New(sha256.New, []byte(key))
h.Write([]byte(payload))
return base64.StdEncoding.EncodeToString(h.Sum(nil))
}<|fim▁end|>
|
// SendEvents posts one or more events to GameAnalytics using the server config
func (s *Server) SendEvents(e []Event) error {
payload, err := json.Marshal(e)
|
<|file_name|>canvas.react.js<|end_file_name|><|fim▁begin|>// Copyright (c) 2015 Uber Technologies, Inc.
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is<|fim▁hole|>// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
import React, {PropTypes, Component} from 'react';
import ViewportMercator from 'viewport-mercator-project';
import window from 'global/window';
export default class CanvasOverlay extends Component {
static propTypes = {
width: PropTypes.number.isRequired,
height: PropTypes.number.isRequired,
latitude: PropTypes.number.isRequired,
longitude: PropTypes.number.isRequired,
zoom: PropTypes.number.isRequired,
redraw: PropTypes.func.isRequired,
isDragging: PropTypes.bool.isRequired
};
componentDidMount() {
this._redraw();
}
componentDidUpdate() {
this._redraw();
}
_redraw() {
const pixelRatio = window.devicePixelRatio || 1;
const canvas = this.refs.overlay;
const ctx = canvas.getContext('2d');
ctx.save();
ctx.scale(pixelRatio, pixelRatio);
const mercator = ViewportMercator(this.props);
this.props.redraw({
width: this.props.width,
height: this.props.height,
ctx,
project: mercator.project,
unproject: mercator.unproject,
isDragging: this.props.isDragging
});
ctx.restore();
}
render() {
const pixelRatio = window.devicePixelRatio || 1;
return (
<canvas
ref="overlay"
width={ this.props.width * pixelRatio }
height={ this.props.height * pixelRatio }
style={ {
width: `${this.props.width}px`,
height: `${this.props.height}px`,
position: 'absolute',
pointerEvents: 'none',
left: 0,
top: 0
} }/>
);
}
}<|fim▁end|>
| |
<|file_name|>test_dummy_thread.py<|end_file_name|><|fim▁begin|>"""Generic thread tests.
Meant to be used by dummy_thread and thread. To allow for different modules
to be used, test_main() can be called with the module to use as the thread
implementation as its sole argument.
"""
import dummy_thread as _thread
import time
import Queue
import random
import unittest
from test import test_support
DELAY = 0 # Set > 0 when testing a module other than dummy_thread, such as
# the 'thread' module.
class LockTests(unittest.TestCase):
"""Test lock objects."""
def setUp(self):
# Create a lock
self.lock = _thread.allocate_lock()
def test_initlock(self):
#Make sure locks start locked
self.failUnless(not self.lock.locked(),
"Lock object is not initialized unlocked.")
def test_release(self):
# Test self.lock.release()
self.lock.acquire()
self.lock.release()
self.failUnless(not self.lock.locked(),
"Lock object did not release properly.")
def test_improper_release(self):
#Make sure release of an unlocked thread raises _thread.error
self.failUnlessRaises(_thread.error, self.lock.release)
def test_cond_acquire_success(self):
#Make sure the conditional acquiring of the lock works.
self.failUnless(self.lock.acquire(0),
"Conditional acquiring of the lock failed.")
def test_cond_acquire_fail(self):
#Test acquiring locked lock returns False
self.lock.acquire(0)
self.failUnless(not self.lock.acquire(0),
"Conditional acquiring of a locked lock incorrectly "
"succeeded.")
def test_uncond_acquire_success(self):
#Make sure unconditional acquiring of a lock works.
self.lock.acquire()
self.failUnless(self.lock.locked(),
"Uncondional locking failed.")
def test_uncond_acquire_return_val(self):
#Make sure that an unconditional locking returns True.
self.failUnless(self.lock.acquire(1) is True,
"Unconditional locking did not return True.")
def test_uncond_acquire_blocking(self):
#Make sure that unconditional acquiring of a locked lock blocks.
def delay_unlock(to_unlock, delay):
"""Hold on to lock for a set amount of time before unlocking."""
time.sleep(delay)
to_unlock.release()
self.lock.acquire()<|fim▁hole|> print "*** Waiting for thread to release the lock "\
"(approx. %s sec.) ***" % DELAY
self.lock.acquire()
end_time = int(time.time())
if test_support.verbose:
print "done"
self.failUnless((end_time - start_time) >= DELAY,
"Blocking by unconditional acquiring failed.")
class MiscTests(unittest.TestCase):
"""Miscellaneous tests."""
def test_exit(self):
#Make sure _thread.exit() raises SystemExit
self.failUnlessRaises(SystemExit, _thread.exit)
def test_ident(self):
#Test sanity of _thread.get_ident()
self.failUnless(isinstance(_thread.get_ident(), int),
"_thread.get_ident() returned a non-integer")
self.failUnless(_thread.get_ident() != 0,
"_thread.get_ident() returned 0")
def test_LockType(self):
#Make sure _thread.LockType is the same type as _thread.allocate_locke()
self.failUnless(isinstance(_thread.allocate_lock(), _thread.LockType),
"_thread.LockType is not an instance of what is "
"returned by _thread.allocate_lock()")
def test_interrupt_main(self):
#Calling start_new_thread with a function that executes interrupt_main
# should raise KeyboardInterrupt upon completion.
def call_interrupt():
_thread.interrupt_main()
self.failUnlessRaises(KeyboardInterrupt, _thread.start_new_thread,
call_interrupt, tuple())
def test_interrupt_in_main(self):
# Make sure that if interrupt_main is called in main threat that
# KeyboardInterrupt is raised instantly.
self.failUnlessRaises(KeyboardInterrupt, _thread.interrupt_main)
class ThreadTests(unittest.TestCase):
"""Test thread creation."""
def test_arg_passing(self):
#Make sure that parameter passing works.
def arg_tester(queue, arg1=False, arg2=False):
"""Use to test _thread.start_new_thread() passes args properly."""
queue.put((arg1, arg2))
testing_queue = Queue.Queue(1)
_thread.start_new_thread(arg_tester, (testing_queue, True, True))
result = testing_queue.get()
self.failUnless(result[0] and result[1],
"Argument passing for thread creation using tuple failed")
_thread.start_new_thread(arg_tester, tuple(), {'queue':testing_queue,
'arg1':True, 'arg2':True})
result = testing_queue.get()
self.failUnless(result[0] and result[1],
"Argument passing for thread creation using kwargs failed")
_thread.start_new_thread(arg_tester, (testing_queue, True), {'arg2':True})
result = testing_queue.get()
self.failUnless(result[0] and result[1],
"Argument passing for thread creation using both tuple"
" and kwargs failed")
def test_multi_creation(self):
#Make sure multiple threads can be created.
def queue_mark(queue, delay):
"""Wait for ``delay`` seconds and then put something into ``queue``"""
time.sleep(delay)
queue.put(_thread.get_ident())
thread_count = 5
testing_queue = Queue.Queue(thread_count)
if test_support.verbose:
print
print "*** Testing multiple thread creation "\
"(will take approx. %s to %s sec.) ***" % (DELAY, thread_count)
for count in xrange(thread_count):
if DELAY:
local_delay = round(random.random(), 1)
else:
local_delay = 0
_thread.start_new_thread(queue_mark,
(testing_queue, local_delay))
time.sleep(DELAY)
if test_support.verbose:
print 'done'
self.failUnless(testing_queue.qsize() == thread_count,
"Not all %s threads executed properly after %s sec." %
(thread_count, DELAY))
def test_main(imported_module=None):
global _thread, DELAY
if imported_module:
_thread = imported_module
DELAY = 2
if test_support.verbose:
print
print "*** Using %s as _thread module ***" % _thread
test_support.run_unittest(LockTests, MiscTests, ThreadTests)
if __name__ == '__main__':
test_main()<|fim▁end|>
|
start_time = int(time.time())
_thread.start_new_thread(delay_unlock,(self.lock, DELAY))
if test_support.verbose:
print
|
<|file_name|>server.js<|end_file_name|><|fim▁begin|>//setup Dependencies<|fim▁hole|>var connect = require('connect');
//Setup Express
var express = require('express');
var path = require('path');
let app = express();
var server = require('http').Server(app);
var io = require('socket.io')(server);
var keypress = require('keypress');
var port = (process.env.PORT || 8081);
var muted = false;
const debug = true;
app.set("view engine", "pug");
app.set("views", path.join(__dirname, "views"));
app.use(express.static(path.join(__dirname, "public")));
app.set('env', 'development');
server.listen(port);
var message = '';
var main_socket;
var auksalaq_mode = 'chatMode';
//Setup Socket.IO
io.on('connection', function(socket){
if(debug){
console.log('Client Connected');
}
main_socket = socket;
//start time
setInterval(sendTime, 1000);
//ceiling
socket.on('ceiling_newuser', function (data) {
if(debug){
console.log('new user added! ' + data.username);
console.log(data);
}
socket.emit('ceiling_user_confirmed', data);
socket.broadcast.emit('ceiling_user_confirmed', data);
});
//see NomadsMobileClient.js for data var
socket.on('ceiling_message', function(data){
socket.broadcast.emit('ceiling_proc_update',data); //send data to all clients for processing sketch
socket.broadcast.emit('ceiling_client_update',data); //send data back to all clients?
if(debug){
console.log(data);
}
});
//auksalaq
socket.on('auksalaq_newuser', function (data) {
if(debug){
console.log('new user added! ' + data.username);
console.log(data);
}
data.mode = auksalaq_mode;
data.muted = muted;
socket.emit('auksalaq_user_confirmed', data);
socket.broadcast.emit('auksalaq_user_confirmed', data);
});
//see NomadsMobileClient.js for data var
socket.on('auksalaq_message', function(data){
//socket.broadcast.emit('auksalaq_proc_update',data); //send data to all clients for processing sketch
socket.broadcast.emit('auksalaq_client_update',data);
socket.emit('auksalaq_client_update',data);
if(debug){
console.log(data);
}
});
//mode change from controller
socket.on('auksalaq_mode', function(data){
socket.broadcast.emit('auksalaq_mode', data);
auksalaq_mode = data;
if(debug){
console.log(data);
}
});
socket.on('mute_state', function(data){
muted = data;
socket.broadcast.emit('mute_state', data);
console.log(data);
});
//clocky
socket.on('clock_start', function(data){
socket.broadcast.emit('clock_start', data);
if(debug){
console.log(data);
}
});
socket.on('clock_stop', function(data){
socket.broadcast.emit('clock_stop', data);
if(debug){
console.log(data);
}
});
socket.on('clock_reset', function(data){
socket.broadcast.emit('clock_reset', data);
if(debug){
console.log("resettting clock");
}
});
/*
socket.on('begin_ceiling', function(){
;
});
socket.on('begin_auksalak', function(){
;
});
socket.on('stop_ceiling', function(){
;
});
socket.on('stop_auksalak', function(){
;
});
*/
socket.on('disconnect', function(){
if(debug){
console.log('Client Disconnected.');
}
});
});
///////////////////////////////////////////
// Routes //
///////////////////////////////////////////
/////// ADD ALL YOUR ROUTES HERE /////////
app.get('/', function(req,res){
//res.send('hello world');
res.render('index.pug', {
locals : {
title : 'Nomads'
,description: 'Nomads System'
,author: 'TThatcher'
,analyticssiteid: 'XXXXXXX'
,cache: 'false'
}
});
});
// The Ceiling Floats Away Routes
app.get('/ceiling', function(req,res){
res.render('ceiling/ceiling_client.pug', {
locals : {
title : 'The Ceiling Floats Away'
,description: 'The Ceiluing Floats Away'
,author: 'TThatcher'
,analyticssiteid: 'XXXXXXX'
}
});
});
app.get('/ceiling_display', function(req,res){
res.render('ceiling/ceiling_display.pug', {
locals : {
title : 'The Ceiling Floats Away'
,description: 'Ceiling Nomads message disply'
,author: 'TThatcher'
,analyticssiteid: 'XXXXXXX'
}
});
});
app.get('/ceiling_control', function(req,res){
res.render('ceiling/ceiling_control.pug', {
locals : {
title : 'The Ceiling Floats Away Control'
,description: 'Ceiling Nomads System Control'
,author: 'TThatcher'
,analyticssiteid: 'XXXXXXX'
}
});
});
// Auksalaq Routes
app.get('/auksalaq', function(req,res){
res.render('auksalaq/auksalaq_client.pug', {
locals : {
title : 'Auksalaq'
,description: 'Auksalaq Nomads System'
,author: 'TThatcher'
,analyticssiteid: 'XXXXXXX'
}
});
});
app.get('/auksalaq_display', function(req,res){
res.render('auksalaq/auksalaq_display.pug', {
locals : {
title : 'Auksalaq'
,description: 'Auksalaq Nomads message disply'
,author: 'TThatcher'
,analyticssiteid: 'XXXXXXX'
}
});
});
app.get('/auksalaq_control', function(req,res){
res.render('auksalaq/auksalaq_control.pug', {
locals : {
title : 'Auksalaq Control'
,description: 'Auksalaq Nomads System Control'
,author: 'TThatcher'
,analyticssiteid: 'XXXXXXX'
}
});
});
app.get('/auksalaq_clock', function(req,res){
res.render('auksalaq/auksalaq_clock.pug', {
locals : {
title : 'Auksalaq Clock'
,description: 'Auksalaq Nomads System Clock'
,author: 'TThatcher'
,analyticssiteid: 'XXXXXXX'
}
});
});
// catch 404 and forward to error handler
app.use(function(req, res, next) {
var err = new Error('Not Found '+req);
err.status = 404;
next(err);
});
// error handler
app.use(function(err, req, res, next) {
// set locals, only providing error in development
res.locals.message = err.message;
res.locals.error = req.app.get('env') === 'development' ? err : {};
// very basic!
if(debug){
console.error(err.stack);
}
// render the error page
res.status(err.status || 500);
res.render('404');
});
function NotFound(msg){
this.name = 'NotFound';
Error.call(this, msg);
Error.captureStackTrace(this, arguments.callee);
}
if(debug){
console.log('Listening on http://127.0.0.1:' + port );
}
//for testing
sendChat = function(data, type){
if(debug)
console.log("sending data ", data);
var messageToSend = {};
messageToSend.id = 123;
messageToSend.username = "Nomads_Server";
messageToSend.type = type;
messageToSend.messageText = data;
messageToSend.location = 0;
messageToSend.latitude = 0;
messageToSend.longitude = 0;
messageToSend.x = 0;
messageToSend.y = 0;
var date = new Date();
d = date.getMonth()+1+"."+date.getDate()+"."+date.getFullYear()+ " at " + date.getHours()+":"+date.getMinutes()+":"+date.getSeconds();
messageToSend.timestamp = d;
main_socket.broadcast.emit('auksalaq_client_update', messageToSend);
}
sendTime = function(){
var d = new Date();
main_socket.broadcast.emit('clock_update', d.getTime());
}<|fim▁end|>
| |
<|file_name|>index.ios.js<|end_file_name|><|fim▁begin|>/**
* Sample React Native App
* https://github.com/facebook/react-native
* @flow
*/
import React, { Component } from 'react';
import { AppRegistry } from 'react-native';
import Navigation from './app/config/entry';
export default class RNJueJin extends Component {
render() {
return (
<Navigation />
);<|fim▁hole|>
AppRegistry.registerComponent('RNJueJin', () => RNJueJin);<|fim▁end|>
|
}
}
|
<|file_name|>NetworkVarianceTableModel.java<|end_file_name|><|fim▁begin|>/*
* This file is part of the GeMTC software for MTC model generation and
* analysis. GeMTC is distributed from http://drugis.org/gemtc.
* Copyright (C) 2009-2012 Gert van Valkenhoef.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.drugis.mtc.presentation.results;
import java.beans.PropertyChangeEvent;
import java.beans.PropertyChangeListener;
import javax.swing.table.AbstractTableModel;
import org.drugis.mtc.Parameter;
import org.drugis.mtc.presentation.InconsistencyWrapper;
import org.drugis.mtc.presentation.MTCModelWrapper;
import org.drugis.mtc.summary.QuantileSummary;<|fim▁hole|>@SuppressWarnings("serial")
public class NetworkVarianceTableModel extends AbstractTableModel {
private static final int RANDOM_EFFECTS = 0;
private final MTCModelWrapper<?> d_mtc;
private final PropertyChangeListener d_listener;
public NetworkVarianceTableModel(final MTCModelWrapper<?> mtc) {
d_mtc = mtc;
d_listener = new PropertyChangeListener() {
@Override
public void propertyChange(final PropertyChangeEvent evt) {
fireTableDataChanged();
}
};
if (isInconsistency()) {
attachListener(((InconsistencyWrapper<?>) d_mtc).getInconsistencyVariance());
}
attachListener(mtc.getRandomEffectsStandardDeviation());
}
private void attachListener(final Parameter p) {
final QuantileSummary quantileSummary = d_mtc.getQuantileSummary(p);
if(quantileSummary != null) {
quantileSummary.addPropertyChangeListener(d_listener);
}
}
@Override
public Class<?> getColumnClass(final int columnIndex) {
if (columnIndex == 0) {
return String.class;
} else {
return QuantileSummary.class;
}
}
@Override
public String getColumnName(final int column) {
return column == 0 ? "Parameter" : "Median (95% CrI)";
}
@Override
public int getRowCount() {
return isInconsistency() ? 2 : 1;
}
private boolean isInconsistency() {
return (d_mtc instanceof InconsistencyWrapper);
}
@Override
public Object getValueAt(final int row, final int col) {
if (col == 0) {
return getRowDescription(row);
} else {
return getEstimate(row);
}
}
private QuantileSummary getEstimate(final int row) {
return row == RANDOM_EFFECTS ? getRandomEffectsSummary() : getInconsistencySummary();
}
private QuantileSummary getInconsistencySummary() {
if (isInconsistency()) {
final Parameter p = ((InconsistencyWrapper<?>) d_mtc).getInconsistencyVariance();
return d_mtc.getQuantileSummary(p);
}
return null;
}
private QuantileSummary getRandomEffectsSummary() {
final Parameter p = d_mtc.getRandomEffectsStandardDeviation();
return d_mtc.getQuantileSummary(p);
}
private String getRowDescription(final int row) {
if (row == RANDOM_EFFECTS) {
return "Random Effects Standard Deviation";
} else {
return "Inconsistency Standard Deviation";
}
}
@Override
public int getColumnCount() {
return 2;
}
}<|fim▁end|>
| |
<|file_name|>clustertriggerauthentication.go<|end_file_name|><|fim▁begin|>/*
Copyright 2020 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package filtered
import (
context "context"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
cache "k8s.io/client-go/tools/cache"
apiskedav1alpha1 "knative.dev/eventing-autoscaler-keda/third_party/pkg/apis/keda/v1alpha1"
versioned "knative.dev/eventing-autoscaler-keda/third_party/pkg/client/clientset/versioned"
v1alpha1 "knative.dev/eventing-autoscaler-keda/third_party/pkg/client/informers/externalversions/keda/v1alpha1"
client "knative.dev/eventing-autoscaler-keda/third_party/pkg/client/injection/client"
filtered "knative.dev/eventing-autoscaler-keda/third_party/pkg/client/injection/informers/factory/filtered"
kedav1alpha1 "knative.dev/eventing-autoscaler-keda/third_party/pkg/client/listers/keda/v1alpha1"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterFilteredInformers(withInformer)
injection.Dynamic.RegisterDynamicInformer(withDynamicInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct {
Selector string
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(filtered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := filtered.Get(ctx, selector)
inf := f.Keda().V1alpha1().ClusterTriggerAuthentications()
ctx = context.WithValue(ctx, Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
func withDynamicInformer(ctx context.Context) context.Context {<|fim▁hole|> logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
for _, selector := range labelSelectors {
inf := &wrapper{client: client.Get(ctx), selector: selector}
ctx = context.WithValue(ctx, Key{Selector: selector}, inf)
}
return ctx
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context, selector string) v1alpha1.ClusterTriggerAuthenticationInformer {
untyped := ctx.Value(Key{Selector: selector})
if untyped == nil {
logging.FromContext(ctx).Panicf(
"Unable to fetch knative.dev/eventing-autoscaler-keda/third_party/pkg/client/informers/externalversions/keda/v1alpha1.ClusterTriggerAuthenticationInformer with selector %s from context.", selector)
}
return untyped.(v1alpha1.ClusterTriggerAuthenticationInformer)
}
type wrapper struct {
client versioned.Interface
selector string
}
var _ v1alpha1.ClusterTriggerAuthenticationInformer = (*wrapper)(nil)
var _ kedav1alpha1.ClusterTriggerAuthenticationLister = (*wrapper)(nil)
func (w *wrapper) Informer() cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(nil, &apiskedav1alpha1.ClusterTriggerAuthentication{}, 0, nil)
}
func (w *wrapper) Lister() kedav1alpha1.ClusterTriggerAuthenticationLister {
return w
}
func (w *wrapper) List(selector labels.Selector) (ret []*apiskedav1alpha1.ClusterTriggerAuthentication, err error) {
reqs, err := labels.ParseToRequirements(w.selector)
if err != nil {
return nil, err
}
selector = selector.Add(reqs...)
lo, err := w.client.KedaV1alpha1().ClusterTriggerAuthentications().List(context.TODO(), v1.ListOptions{
LabelSelector: selector.String(),
// TODO(mattmoor): Incorporate resourceVersion bounds based on staleness criteria.
})
if err != nil {
return nil, err
}
for idx := range lo.Items {
ret = append(ret, &lo.Items[idx])
}
return ret, nil
}
func (w *wrapper) Get(name string) (*apiskedav1alpha1.ClusterTriggerAuthentication, error) {
// TODO(mattmoor): Check that the fetched object matches the selector.
return w.client.KedaV1alpha1().ClusterTriggerAuthentications().Get(context.TODO(), name, v1.GetOptions{
// TODO(mattmoor): Incorporate resourceVersion bounds based on staleness criteria.
})
}<|fim▁end|>
|
untyped := ctx.Value(filtered.LabelKey{})
if untyped == nil {
|
<|file_name|>api-creation.component.ts<|end_file_name|><|fim▁begin|>/*
* Copyright (C) 2015 The Gravitee team (http://gravitee.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const ApiCreationComponent: ng.IComponentOptions = {
bindings: {<|fim▁hole|> template: require('./api-creation.html'),
controller: 'ApiCreationController',
};
export default ApiCreationComponent;<|fim▁end|>
|
tags: '<',
tenants: '<',
groups: '<',
},
|
<|file_name|>DetailedResultsActivity.java<|end_file_name|><|fim▁begin|>package edu.osu.cse5236.group9.dieta;
import android.content.Intent;
import android.os.Bundle;
import android.support.v4.app.FragmentActivity;
import android.support.v4.app.FragmentManager;
import android.support.v4.app.FragmentTransaction;
import android.util.Log;
import android.view.View;
import android.widget.TextView;
public class DetailedResultsActivity extends FragmentActivity implements View.OnClickListener{
private static final String ACTIVITYNAME = "DetailedResultsActivity";
private Meal mMeal;
private int currentIndex;
private int mealSize;
private TextView mTextView;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
Log.d(ACTIVITYNAME, "onCreate(Bundle) called");
setContentView(R.layout.activity_detailed_results);
// TODO: get meal from prior class
mMeal=getIntent().getParcelableExtra("mMeal");
mealSize=mMeal.getFoods().size();
if(savedInstanceState!=null) {
currentIndex = savedInstanceState.getInt("curIndex");
} else {
currentIndex=0;
}
ResultsFragment resultsFragment= new ResultsFragment();
if (mealSize > 0) {
resultsFragment.passFood(mMeal.getFoods().get(currentIndex));
}
FragmentManager fragmentManager=getSupportFragmentManager();
FragmentTransaction fragmentTransaction=fragmentManager.beginTransaction();
fragmentTransaction.add(R.id.detailedresults_nfacts,resultsFragment);
fragmentTransaction.commit();
View buttonLeft=findViewById(R.id.detailedresults_left);
View buttonRight=findViewById(R.id.detailedresults_right);
View buttonFinish=findViewById(R.id.detailedresults_finish);
mTextView=(TextView) findViewById(R.id.textView_foodname);
buttonLeft.setOnClickListener(this);
buttonRight.setOnClickListener(this);
buttonFinish.setOnClickListener(this);
if (mealSize>0) {
mTextView.setText(mMeal.getFoods().get(currentIndex).getName());
}
}
@Override
public void onSaveInstanceState(Bundle savedInstanceState) {
savedInstanceState.putInt("curIndex",currentIndex);
}
public void onClick(View v) {
switch (v.getId()) {
case R.id.detailedresults_left:
if (currentIndex>0) {
currentIndex--;
if(getSupportFragmentManager().findFragmentById(R.id.detailedresults_nfacts) != null) {
ResultsFragment resultsFragment= new ResultsFragment();
resultsFragment.passFood(mMeal.getFoods().get(currentIndex));
mTextView.setText(mMeal.getFoods().get(currentIndex).getName());
getSupportFragmentManager().beginTransaction().replace(R.id.detailedresults_nfacts,resultsFragment).commit();
}
}
break;
case R.id.detailedresults_right:
if (currentIndex<mealSize-1) {
currentIndex++;
if(getSupportFragmentManager().findFragmentById(R.id.detailedresults_nfacts) != null) {
ResultsFragment resultsFragment= new ResultsFragment();
resultsFragment.passFood(mMeal.getFoods().get(currentIndex));
mTextView.setText(mMeal.getFoods().get(currentIndex).getName());
getSupportFragmentManager().beginTransaction().replace(R.id.detailedresults_nfacts,resultsFragment).commit();
}
}
break;
case R.id.detailedresults_finish:
startActivity(new Intent(this,NewFoodActivity.class));
break;
}
}
@Override
public void onStart() {
super.onStart();
Log.d(ACTIVITYNAME, "onStart() called");
}
@Override
public void onPause() {
super.onPause();
Log.d(ACTIVITYNAME, "onPause() called");
}
@Override
public void onResume() {
super.onResume();
Log.d(ACTIVITYNAME, "onResume() called");
}<|fim▁hole|> super.onStop();
Log.d(ACTIVITYNAME, "onStop() called");
}
@Override
public void onDestroy() {
super.onDestroy();
Log.d(ACTIVITYNAME, "onDestroy() called");
}
}<|fim▁end|>
|
@Override
public void onStop() {
|
<|file_name|>indent.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
from gi.repository import GObject, Gtk, Gedit, PeasGtk
import ConfigParser
UI_XML = '''<ui>
<menubar name="MenuBar">
<menu name="ToolsMenu" action="Tools">
<placeholder name="ToolsOps_2">
<menuitem name="Change Indent" action="ChangeIndentPlugin"/>
</placeholder>
</menu>
</menubar>
</ui>'''
class ChangeIndentPlugin(GObject.Object, Gedit.WindowActivatable, PeasGtk.Configurable):
__gtype_name__ = 'ChangeIndentPlugin'
window = GObject.property(type=Gedit.Window)
# config
config = ConfigParser.ConfigParser()
config_file = 'indent.cfg'
spaces = 2
tab = False
def __init__(self):
GObject.Object.__init__(self)
self._get_config()
def _add_ui(self):
manager = self.window.get_ui_manager()
self._actions = Gtk.ActionGroup('ChangeIndentActions')
self._actions.add_actions([
(
'ChangeIndentPlugin',
Gtk.STOCK_INFO,
'Change Indent',
'<control><alt>i',
'Change indent in current document',
self.on_change_indent
),
])
manager.insert_action_group(self._actions)
self._ui_merge_id = manager.add_ui_from_string(UI_XML)
manager.ensure_update()
def do_activate(self):
self._add_ui()
def do_deactivate(self):
self._remove_ui()
def do_update_state(self):
pass
def do_create_configure_widget(self):
box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=10)
box.set_border_width(20)
label = Gtk.Label('Change Indent Configuration (Tab to spaces).')
box.pack_start(label, False, False, 0)
section = Gtk.Box(spacing=10)
label = Gtk.Label('Spaces')
section.pack_start(label, False, False, 0)
adjustment = Gtk.Adjustment(self.spaces, 2, 10, 1, 10, 0)
spinbutton = Gtk.SpinButton()
spinbutton.set_adjustment(adjustment)
spinbutton.connect("value-changed", self.on_spaces_value_changed)
section.pack_start(spinbutton, False, False, 0)
change_tab = Gtk.CheckButton("Spaces to Tab")
change_tab.connect("toggled", self.on_change_tab_toggled)
change_tab.set_active(True if self.tab == 1 else False)
section.pack_start(change_tab, False, False, 0)<|fim▁hole|> def on_spaces_value_changed(self, button):
self.spaces = int(button.get_value())
self._save_config()
def on_change_tab_toggled(self, button):
self.tab = button.get_active()
self._save_config()
def on_change_indent(self, action, data=None):
self._get_config()
doc = self.window.get_active_document()
text = ''
builded_spaces = ''
for i in range(self.spaces):
builded_spaces += ' '
if doc:
start, end = doc.get_bounds()
text = doc.get_text(start, end, False)
stripped_text = []
for line in text.split('\n'):
if self.tab:
stripped_text.append(line.replace(builded_spaces, '\t'))
else:
stripped_text.append(line.replace('\t', builded_spaces))
doc.set_text('\n'.join(stripped_text))
def _get_config(self):
self.config.read(self.config_file)
if self.config.has_option('settings', 'tab'):
self.tab = self.config.getint('settings', 'tab')
if self.config.has_option('settings', 'spaces'):
self.spaces = self.config.getint('settings', 'spaces')
def _save_config(self):
f = open(self.config_file, 'w')
if not self.config.has_section('settings'):
self.config.add_section('settings')
self.config.set('settings', 'tab', 1 if self.tab else 0)
self.config.set('settings', 'spaces', self.spaces)
self.config.write(f)
f.close()
def _remove_ui(self):
manager = self.window.get_ui_manager()
manager.remove_ui(self._ui_merge_id)
manager.remove_action_group(self._actions)
manager.ensure_update()<|fim▁end|>
|
box.pack_start(section, False, False, 0)
return box
|
<|file_name|>IRLS_tf_v2.py<|end_file_name|><|fim▁begin|># python 3
# tensorflow 2.0
from __future__ import print_function, division, absolute_import
import os
import argparse
import random
import numpy as np
import datetime
# from numpy import linalg
import os.path as osp
import sys
cur_dir = osp.dirname(osp.abspath(__file__))
sys.path.insert(1, osp.join(cur_dir, '.'))
from sklearn.datasets import load_svmlight_file
from scipy.sparse import csr_matrix
# from scipy.sparse import linalg
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import tensorflow as tf
from tf_utils import pinv_naive, pinv
path_train = osp.join(cur_dir, "../a9a/a9a")
path_test = osp.join(cur_dir, "../a9a/a9a.t")
MAX_ITER = 100
np_dtype = np.float32
tf_dtype = tf.float32
# manual seed
manualSeed = random.randint(1, 10000) # fix seed
print("Random Seed: ", manualSeed)
random.seed(manualSeed)
np.random.seed(manualSeed)
# load all data
X_train, y_train = load_svmlight_file(path_train, n_features=123, dtype=np_dtype)
X_test, y_test = load_svmlight_file(path_test, n_features=123, dtype=np_dtype)
# X: scipy.sparse.csr.csr_matrix
# X_train: (32561, 123), y_train: (32561,)
# X_test: (16281, 123), y_test:(16281,)
# stack a dimension of ones to X to simplify computation
N_train = X_train.shape[0]
N_test = X_test.shape[0]
X_train = np.hstack((np.ones((N_train, 1)), X_train.toarray())).astype(np_dtype)
X_test = np.hstack((np.ones((N_test, 1)), X_test.toarray())).astype(np_dtype)
# print(X_train.shape, X_test.shape)
y_train = y_train.reshape((N_train, 1))
y_test = y_test.reshape((N_test, 1))
# label: -1, +1 ==> 0, 1
y_train = np.where(y_train == -1, 0, 1)
y_test = np.where(y_test == -1, 0, 1)
# NB: here X's shape is (N,d), which differs to the derivation
def neg_log_likelihood(w, X, y, L2_param=None):
"""
w: dx1
X: Nxd
y: Nx1
L2_param: \lambda>0, will introduce -\lambda/2 ||w||_2^2
"""
# print(type(X), X.dtype)
res = tf.matmul(tf.matmul(tf.transpose(w), tf.transpose(X)), y.astype(np_dtype)) - \
tf.reduce_sum(tf.math.log(1 + tf.exp(tf.matmul(X, w))))
if L2_param != None and L2_param > 0:
res += -0.5 * L2_param * tf.matmul(tf.transpose(w), w)
return -res[0][0]
def prob(X, w):
"""
X: Nxd
w: dx1
---
prob: N x num_classes(2)"""
y = tf.constant(np.array([0.0, 1.0]), dtype=tf.float32)
prob = tf.exp(tf.matmul(X, w) * y) / (1 + tf.exp(tf.matmul(X, w)))
return prob
def compute_acc(X, y, w):
p = prob(X, w)
y_pred = tf.cast(tf.argmax(p, axis=1), tf.float32)
y = tf.cast(tf.squeeze(y), tf.float32)
acc = tf.reduce_mean(tf.cast(tf.equal(y, y_pred), tf.float32))
return acc
def update(w_old, X, y, L2_param=0):
"""
w_new = w_old - w_update
w_update = (X'RX+lambda*I)^(-1) (X'(mu-y) + lambda*w_old)
lambda is L2_param
w_old: dx1
X: Nxd
y: Nx1
---
w_update: dx1
"""
d = X.shape[1]
mu = tf.sigmoid(tf.matmul(X, w_old)) # Nx1
R_flat = mu * (1 - mu) # element-wise, Nx1
L2_reg_term = L2_param * tf.eye(d)
XRX = tf.matmul(tf.transpose(X), R_flat * X) + L2_reg_term # dxd
# np.save('XRX_tf.npy', XRX.numpy())
# calculate pseudo inverse via SVD
# method 1
# slightly better than tfp.math.pinv when L2_param=0
XRX_pinv = pinv_naive(XRX)<|fim▁hole|> # method 2
# XRX_pinv = pinv(XRX)
# w = w - (X^T R X)^(-1) X^T (mu-y)
# w_new = tf.assign(w_old, w_old - tf.matmul(tf.matmul(XRX_pinv, tf.transpose(X)), mu - y))
y = tf.cast(y, tf_dtype)
w_update = tf.matmul(XRX_pinv, tf.matmul(tf.transpose(X), mu - y) + L2_param * w_old)
return w_update
def optimize(w_old, w_update):
"""custom update op, instead of using SGD variants"""
return w_old.assign(w_old - w_update)
def train_IRLS(X_train, y_train, X_test=None, y_test=None, L2_param=0, max_iter=MAX_ITER):
"""train Logistic Regression via IRLS algorithm
X: Nxd
y: Nx1
---
"""
N, d = X_train.shape
w = tf.Variable(0.01 * tf.ones((d, 1), dtype=tf.float32), name="w")
current_time = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
summary_writer = tf.summary.create_file_writer(f"./logs/{current_time}")
print("start training...")
print("L2 param(lambda): {}".format(L2_param))
i = 0
# iteration
while i <= max_iter:
print("iter: {}".format(i))
# print('\t neg log likelihood: {}'.format(sess.run(neg_L, feed_dict=train_feed_dict)))
neg_L = neg_log_likelihood(w, X_train, y_train, L2_param)
print("\t neg log likelihood: {}".format(neg_L))
train_acc = compute_acc(X_train, y_train, w)
with summary_writer.as_default():
tf.summary.scalar("train_acc", train_acc, step=i)
tf.summary.scalar("train_neg_L", neg_L, step=i)
test_acc = compute_acc(X_test, y_test, w)
with summary_writer.as_default():
tf.summary.scalar("test_acc", test_acc, step=i)
print("\t train acc: {}, test acc: {}".format(train_acc, test_acc))
L2_norm_w = np.linalg.norm(w.numpy())
print("\t L2 norm of w: {}".format(L2_norm_w))
if i > 0:
diff_w = np.linalg.norm(w_update.numpy())
print("\t diff of w_old and w: {}".format(diff_w))
if diff_w < 1e-2:
break
w_update = update(w, X_train, y_train, L2_param)
w = optimize(w, w_update)
i += 1
print("training done.")
if __name__ == "__main__":
# test_acc should be about 0.85
lambda_ = 20 # 0
train_IRLS(X_train, y_train, X_test, y_test, L2_param=lambda_, max_iter=100)
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression()
classifier.fit(X_train, y_train.reshape(N_train,))
y_pred_train = classifier.predict(X_train)
train_acc = np.sum(y_train.reshape(N_train,) == y_pred_train)/N_train
print('train_acc: {}'.format(train_acc))
y_pred_test = classifier.predict(X_test)
test_acc = np.sum(y_test.reshape(N_test,) == y_pred_test)/N_test
print('test acc: {}'.format(test_acc))<|fim▁end|>
| |
<|file_name|>sessionmessages.cc<|end_file_name|><|fim▁begin|>/*
* libjingle
* Copyright 2010, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <string>
#include "talk/p2p/base/sessionmessages.h"
#include "talk/base/logging.h"
#include "talk/base/scoped_ptr.h"
#include "talk/base/stringutils.h"
#include "talk/p2p/base/constants.h"
#include "talk/p2p/base/p2ptransport.h"
#include "talk/p2p/base/parsing.h"
#include "talk/p2p/base/sessionclient.h"
#include "talk/p2p/base/sessiondescription.h"
#include "talk/p2p/base/transport.h"
#include "talk/xmllite/xmlconstants.h"
#include "talk/xmpp/constants.h"
namespace cricket {
ActionType ToActionType(const std::string& type) {
if (type == GINGLE_ACTION_INITIATE)
return ACTION_SESSION_INITIATE;
if (type == GINGLE_ACTION_INFO)
return ACTION_SESSION_INFO;
if (type == GINGLE_ACTION_ACCEPT)
return ACTION_SESSION_ACCEPT;
if (type == GINGLE_ACTION_REJECT)
return ACTION_SESSION_REJECT;
if (type == GINGLE_ACTION_TERMINATE)
return ACTION_SESSION_TERMINATE;
if (type == GINGLE_ACTION_CANDIDATES)
return ACTION_TRANSPORT_INFO;
if (type == JINGLE_ACTION_SESSION_INITIATE)
return ACTION_SESSION_INITIATE;
if (type == JINGLE_ACTION_TRANSPORT_INFO)
return ACTION_TRANSPORT_INFO;
if (type == JINGLE_ACTION_TRANSPORT_ACCEPT)
return ACTION_TRANSPORT_ACCEPT;
if (type == JINGLE_ACTION_SESSION_INFO)
return ACTION_SESSION_INFO;
if (type == JINGLE_ACTION_SESSION_ACCEPT)
return ACTION_SESSION_ACCEPT;
if (type == JINGLE_ACTION_SESSION_TERMINATE)
return ACTION_SESSION_TERMINATE;
if (type == JINGLE_ACTION_TRANSPORT_INFO)
return ACTION_TRANSPORT_INFO;
if (type == JINGLE_ACTION_TRANSPORT_ACCEPT)
return ACTION_TRANSPORT_ACCEPT;
if (type == JINGLE_ACTION_DESCRIPTION_INFO)
return ACTION_DESCRIPTION_INFO;
if (type == GINGLE_ACTION_UPDATE)
return ACTION_DESCRIPTION_INFO;
return ACTION_UNKNOWN;
}
std::string ToJingleString(ActionType type) {
switch (type) {
case ACTION_SESSION_INITIATE:
return JINGLE_ACTION_SESSION_INITIATE;
case ACTION_SESSION_INFO:
return JINGLE_ACTION_SESSION_INFO;
case ACTION_SESSION_ACCEPT:
return JINGLE_ACTION_SESSION_ACCEPT;
// Notice that reject and terminate both go to
// "session-terminate", but there is no "session-reject".
case ACTION_SESSION_REJECT:
case ACTION_SESSION_TERMINATE:
return JINGLE_ACTION_SESSION_TERMINATE;
case ACTION_TRANSPORT_INFO:
return JINGLE_ACTION_TRANSPORT_INFO;
case ACTION_TRANSPORT_ACCEPT:
return JINGLE_ACTION_TRANSPORT_ACCEPT;
default:
return "";
}
}
std::string ToGingleString(ActionType type) {
switch (type) {
case ACTION_SESSION_INITIATE:
return GINGLE_ACTION_INITIATE;
case ACTION_SESSION_INFO:
return GINGLE_ACTION_INFO;
case ACTION_SESSION_ACCEPT:
return GINGLE_ACTION_ACCEPT;
case ACTION_SESSION_REJECT:
return GINGLE_ACTION_REJECT;
case ACTION_SESSION_TERMINATE:
return GINGLE_ACTION_TERMINATE;
case ACTION_TRANSPORT_INFO:
return GINGLE_ACTION_CANDIDATES;
default:
return "";
}
}
bool IsJingleMessage(const buzz::XmlElement* stanza) {
const buzz::XmlElement* jingle = stanza->FirstNamed(QN_JINGLE);
if (jingle == NULL)
return false;
return (jingle->HasAttr(buzz::QN_ACTION) && jingle->HasAttr(QN_SID));
}
bool IsGingleMessage(const buzz::XmlElement* stanza) {
const buzz::XmlElement* session = stanza->FirstNamed(QN_GINGLE_SESSION);
if (session == NULL)
return false;
return (session->HasAttr(buzz::QN_TYPE) &&
session->HasAttr(buzz::QN_ID) &&
session->HasAttr(QN_INITIATOR));
}
bool IsSessionMessage(const buzz::XmlElement* stanza) {
return (stanza->Name() == buzz::QN_IQ &&
stanza->Attr(buzz::QN_TYPE) == buzz::STR_SET &&
(IsJingleMessage(stanza) ||
IsGingleMessage(stanza)));
}
bool ParseGingleSessionMessage(const buzz::XmlElement* session,
SessionMessage* msg,
ParseError* error) {
msg->protocol = PROTOCOL_GINGLE;
std::string type_string = session->Attr(buzz::QN_TYPE);
msg->type = ToActionType(type_string);
msg->sid = session->Attr(buzz::QN_ID);
msg->initiator = session->Attr(QN_INITIATOR);
msg->action_elem = session;
if (msg->type == ACTION_UNKNOWN)
return BadParse("unknown action: " + type_string, error);
return true;
}
bool ParseJingleSessionMessage(const buzz::XmlElement* jingle,
SessionMessage* msg,
ParseError* error) {
msg->protocol = PROTOCOL_JINGLE;
std::string type_string = jingle->Attr(buzz::QN_ACTION);
msg->type = ToActionType(type_string);
msg->sid = jingle->Attr(QN_SID);
msg->initiator = GetXmlAttr(jingle, QN_INITIATOR, buzz::STR_EMPTY);
msg->action_elem = jingle;
if (msg->type == ACTION_UNKNOWN)
return BadParse("unknown action: " + type_string, error);
return true;
}
bool ParseHybridSessionMessage(const buzz::XmlElement* jingle,
SessionMessage* msg,
ParseError* error) {
if (!ParseJingleSessionMessage(jingle, msg, error))
return false;
msg->protocol = PROTOCOL_HYBRID;
return true;
}
bool ParseSessionMessage(const buzz::XmlElement* stanza,
SessionMessage* msg,
ParseError* error) {
msg->id = stanza->Attr(buzz::QN_ID);
msg->from = stanza->Attr(buzz::QN_FROM);
msg->to = stanza->Attr(buzz::QN_TO);
msg->stanza = stanza;
const buzz::XmlElement* jingle = stanza->FirstNamed(QN_JINGLE);
const buzz::XmlElement* session = stanza->FirstNamed(QN_GINGLE_SESSION);
if (jingle && session)
return ParseHybridSessionMessage(jingle, msg, error);
if (jingle != NULL)
return ParseJingleSessionMessage(jingle, msg, error);
if (session != NULL)
return ParseGingleSessionMessage(session, msg, error);
return false;
}
buzz::XmlElement* WriteGingleAction(const SessionMessage& msg,
const XmlElements& action_elems) {
buzz::XmlElement* session = new buzz::XmlElement(QN_GINGLE_SESSION, true);
session->AddAttr(buzz::QN_TYPE, ToGingleString(msg.type));
session->AddAttr(buzz::QN_ID, msg.sid);
session->AddAttr(QN_INITIATOR, msg.initiator);<|fim▁hole|>}
buzz::XmlElement* WriteJingleAction(const SessionMessage& msg,
const XmlElements& action_elems) {
buzz::XmlElement* jingle = new buzz::XmlElement(QN_JINGLE, true);
jingle->AddAttr(buzz::QN_ACTION, ToJingleString(msg.type));
jingle->AddAttr(QN_SID, msg.sid);
if (msg.type == ACTION_SESSION_INITIATE) {
jingle->AddAttr(QN_INITIATOR, msg.initiator);
}
AddXmlChildren(jingle, action_elems);
return jingle;
}
void WriteSessionMessage(const SessionMessage& msg,
const XmlElements& action_elems,
buzz::XmlElement* stanza) {
stanza->SetAttr(buzz::QN_TO, msg.to);
stanza->SetAttr(buzz::QN_TYPE, buzz::STR_SET);
if (msg.protocol == PROTOCOL_GINGLE) {
stanza->AddElement(WriteGingleAction(msg, action_elems));
} else {
stanza->AddElement(WriteJingleAction(msg, action_elems));
}
}
TransportParser* GetTransportParser(const TransportParserMap& trans_parsers,
const std::string& name) {
TransportParserMap::const_iterator map = trans_parsers.find(name);
if (map == trans_parsers.end()) {
return NULL;
} else {
return map->second;
}
}
bool ParseCandidates(SignalingProtocol protocol,
const buzz::XmlElement* candidates_elem,
const TransportParserMap& trans_parsers,
const std::string& transport_type,
Candidates* candidates,
ParseError* error) {
TransportParser* trans_parser =
GetTransportParser(trans_parsers, transport_type);
if (trans_parser == NULL)
return BadParse("unknown transport type: " + transport_type, error);
return trans_parser->ParseCandidates(protocol, candidates_elem,
candidates, error);
}
bool ParseGingleTransportInfos(const buzz::XmlElement* action_elem,
const ContentInfos& contents,
const TransportParserMap& trans_parsers,
TransportInfos* tinfos,
ParseError* error) {
TransportInfo tinfo(CN_OTHER, NS_GINGLE_P2P, Candidates());
if (!ParseCandidates(PROTOCOL_GINGLE, action_elem,
trans_parsers, NS_GINGLE_P2P,
&tinfo.candidates, error))
return false;
bool has_audio = FindContentInfoByName(contents, CN_AUDIO) != NULL;
bool has_video = FindContentInfoByName(contents, CN_VIDEO) != NULL;
// If we don't have media, no need to separate the candidates.
if (!has_audio && !has_video) {
tinfos->push_back(tinfo);
return true;
}
// If we have media, separate the candidates. Create the
// TransportInfo here to avoid copying the candidates.
TransportInfo audio_tinfo(CN_AUDIO, NS_GINGLE_P2P, Candidates());
TransportInfo video_tinfo(CN_VIDEO, NS_GINGLE_P2P, Candidates());
for (Candidates::iterator cand = tinfo.candidates.begin();
cand != tinfo.candidates.end(); cand++) {
if (cand->name() == GINGLE_CANDIDATE_NAME_RTP ||
cand->name() == GINGLE_CANDIDATE_NAME_RTCP) {
audio_tinfo.candidates.push_back(*cand);
} else if (cand->name() == GINGLE_CANDIDATE_NAME_VIDEO_RTP ||
cand->name() == GINGLE_CANDIDATE_NAME_VIDEO_RTCP) {
video_tinfo.candidates.push_back(*cand);
}
}
if (has_audio) {
tinfos->push_back(audio_tinfo);
}
if (has_video) {
tinfos->push_back(video_tinfo);
}
return true;
}
bool ParseJingleTransportInfo(const buzz::XmlElement* trans_elem,
const ContentInfo& content,
const TransportParserMap& trans_parsers,
TransportInfos* tinfos,
ParseError* error) {
std::string transport_type = trans_elem->Name().Namespace();
TransportInfo tinfo(content.name, transport_type, Candidates());
if (!ParseCandidates(PROTOCOL_JINGLE, trans_elem,
trans_parsers, transport_type,
&tinfo.candidates, error))
return false;
tinfos->push_back(tinfo);
return true;
}
bool ParseJingleTransportInfos(const buzz::XmlElement* jingle,
const ContentInfos& contents,
const TransportParserMap trans_parsers,
TransportInfos* tinfos,
ParseError* error) {
for (const buzz::XmlElement* pair_elem
= jingle->FirstNamed(QN_JINGLE_CONTENT);
pair_elem != NULL;
pair_elem = pair_elem->NextNamed(QN_JINGLE_CONTENT)) {
std::string content_name;
if (!RequireXmlAttr(pair_elem, QN_JINGLE_CONTENT_NAME,
&content_name, error))
return false;
const ContentInfo* content = FindContentInfoByName(contents, content_name);
if (!content)
return BadParse("Unknown content name: " + content_name, error);
const buzz::XmlElement* trans_elem;
if (!RequireXmlChild(pair_elem, LN_TRANSPORT, &trans_elem, error))
return false;
if (!ParseJingleTransportInfo(trans_elem, *content, trans_parsers,
tinfos, error))
return false;
}
return true;
}
buzz::XmlElement* NewTransportElement(const std::string& name) {
return new buzz::XmlElement(buzz::QName(name, LN_TRANSPORT), true);
}
bool WriteCandidates(SignalingProtocol protocol,
const std::string& trans_type,
const Candidates& candidates,
const TransportParserMap& trans_parsers,
XmlElements* elems,
WriteError* error) {
TransportParser* trans_parser = GetTransportParser(trans_parsers, trans_type);
if (trans_parser == NULL)
return BadWrite("unknown transport type: " + trans_type, error);
return trans_parser->WriteCandidates(protocol, candidates, elems, error);
}
bool WriteGingleTransportInfos(const TransportInfos& tinfos,
const TransportParserMap& trans_parsers,
XmlElements* elems,
WriteError* error) {
for (TransportInfos::const_iterator tinfo = tinfos.begin();
tinfo != tinfos.end(); ++tinfo) {
if (!WriteCandidates(PROTOCOL_GINGLE,
tinfo->transport_type, tinfo->candidates,
trans_parsers, elems, error))
return false;
}
return true;
}
bool WriteJingleTransportInfo(const TransportInfo& tinfo,
const TransportParserMap& trans_parsers,
XmlElements* elems,
WriteError* error) {
XmlElements candidate_elems;
if (!WriteCandidates(PROTOCOL_JINGLE,
tinfo.transport_type, tinfo.candidates, trans_parsers,
&candidate_elems, error))
return false;
buzz::XmlElement* trans_elem = NewTransportElement(tinfo.transport_type);
AddXmlChildren(trans_elem, candidate_elems);
elems->push_back(trans_elem);
return true;
}
void WriteJingleContentPair(const std::string name,
const XmlElements& pair_elems,
XmlElements* elems) {
buzz::XmlElement* pair_elem = new buzz::XmlElement(QN_JINGLE_CONTENT);
pair_elem->SetAttr(QN_JINGLE_CONTENT_NAME, name);
pair_elem->SetAttr(QN_CREATOR, LN_INITIATOR);
AddXmlChildren(pair_elem, pair_elems);
elems->push_back(pair_elem);
}
bool WriteJingleTransportInfos(const TransportInfos& tinfos,
const TransportParserMap& trans_parsers,
XmlElements* elems,
WriteError* error) {
for (TransportInfos::const_iterator tinfo = tinfos.begin();
tinfo != tinfos.end(); ++tinfo) {
XmlElements pair_elems;
if (!WriteJingleTransportInfo(*tinfo, trans_parsers,
&pair_elems, error))
return false;
WriteJingleContentPair(tinfo->content_name, pair_elems, elems);
}
return true;
}
ContentParser* GetContentParser(const ContentParserMap& content_parsers,
const std::string& type) {
ContentParserMap::const_iterator map = content_parsers.find(type);
if (map == content_parsers.end()) {
return NULL;
} else {
return map->second;
}
}
bool ParseContentInfo(SignalingProtocol protocol,
const std::string& name,
const std::string& type,
const buzz::XmlElement* elem,
const ContentParserMap& parsers,
ContentInfos* contents,
ParseError* error) {
ContentParser* parser = GetContentParser(parsers, type);
if (parser == NULL)
return BadParse("unknown application content: " + type, error);
const ContentDescription* desc;
if (!parser->ParseContent(protocol, elem, &desc, error))
return false;
contents->push_back(ContentInfo(name, type, desc));
return true;
}
bool ParseContentType(const buzz::XmlElement* parent_elem,
std::string* content_type,
const buzz::XmlElement** content_elem,
ParseError* error) {
if (!RequireXmlChild(parent_elem, LN_DESCRIPTION, content_elem, error))
return false;
*content_type = (*content_elem)->Name().Namespace();
return true;
}
bool ParseGingleContentInfos(const buzz::XmlElement* session,
const ContentParserMap& content_parsers,
ContentInfos* contents,
ParseError* error) {
std::string content_type;
const buzz::XmlElement* content_elem;
if (!ParseContentType(session, &content_type, &content_elem, error))
return false;
if (content_type == NS_GINGLE_VIDEO) {
// A parser parsing audio or video content should look at the
// namespace and only parse the codecs relevant to that namespace.
// We use this to control which codecs get parsed: first audio,
// then video.
talk_base::scoped_ptr<buzz::XmlElement> audio_elem(
new buzz::XmlElement(QN_GINGLE_AUDIO_CONTENT));
CopyXmlChildren(content_elem, audio_elem.get());
if (!ParseContentInfo(PROTOCOL_GINGLE, CN_AUDIO, NS_JINGLE_RTP,
audio_elem.get(), content_parsers,
contents, error))
return false;
if (!ParseContentInfo(PROTOCOL_GINGLE, CN_VIDEO, NS_JINGLE_RTP,
content_elem, content_parsers,
contents, error))
return false;
} else if (content_type == NS_GINGLE_AUDIO) {
if (!ParseContentInfo(PROTOCOL_GINGLE, CN_AUDIO, NS_JINGLE_RTP,
content_elem, content_parsers,
contents, error))
return false;
} else {
if (!ParseContentInfo(PROTOCOL_GINGLE, CN_OTHER, content_type,
content_elem, content_parsers,
contents, error))
return false;
}
return true;
}
bool ParseJingleContentInfos(const buzz::XmlElement* jingle,
const ContentParserMap& content_parsers,
ContentInfos* contents,
ParseError* error) {
for (const buzz::XmlElement* pair_elem
= jingle->FirstNamed(QN_JINGLE_CONTENT);
pair_elem != NULL;
pair_elem = pair_elem->NextNamed(QN_JINGLE_CONTENT)) {
std::string content_name;
if (!RequireXmlAttr(pair_elem, QN_JINGLE_CONTENT_NAME,
&content_name, error))
return false;
std::string content_type;
const buzz::XmlElement* content_elem;
if (!ParseContentType(pair_elem, &content_type, &content_elem, error))
return false;
if (!ParseContentInfo(PROTOCOL_JINGLE, content_name, content_type,
content_elem, content_parsers,
contents, error))
return false;
}
return true;
}
bool ParseJingleGroupInfos(const buzz::XmlElement* jingle,
ContentGroups* groups,
ParseError* error) {
for (const buzz::XmlElement* pair_elem
= jingle->FirstNamed(QN_JINGLE_DRAFT_GROUP);
pair_elem != NULL;
pair_elem = pair_elem->NextNamed(QN_JINGLE_DRAFT_GROUP)) {
std::string group_name;
if (!RequireXmlAttr(pair_elem, QN_JINGLE_DRAFT_GROUP_TYPE,
&group_name, error))
return false;
ContentGroup group(group_name);
for (const buzz::XmlElement* child_elem
= pair_elem->FirstNamed(QN_JINGLE_CONTENT);
child_elem != NULL;
child_elem = child_elem->NextNamed(QN_JINGLE_CONTENT)) {
std::string content_name;
if (!RequireXmlAttr(child_elem, QN_JINGLE_CONTENT_NAME,
&content_name, error))
return false;
group.AddContentName(content_name);
}
groups->push_back(group);
}
return true;
}
buzz::XmlElement* WriteContentInfo(SignalingProtocol protocol,
const ContentInfo& content,
const ContentParserMap& parsers,
WriteError* error) {
ContentParser* parser = GetContentParser(parsers, content.type);
if (parser == NULL) {
BadWrite("unknown content type: " + content.type, error);
return NULL;
}
buzz::XmlElement* elem = NULL;
if (!parser->WriteContent(protocol, content.description, &elem, error))
return NULL;
return elem;
}
bool WriteGingleContentInfos(const ContentInfos& contents,
const ContentParserMap& parsers,
XmlElements* elems,
WriteError* error) {
if (contents.size() == 1) {
buzz::XmlElement* elem = WriteContentInfo(
PROTOCOL_GINGLE, contents.front(), parsers, error);
if (!elem)
return false;
elems->push_back(elem);
} else if (contents.size() == 2 &&
contents.at(0).type == NS_JINGLE_RTP &&
contents.at(1).type == NS_JINGLE_RTP) {
// Special-case audio + video contents so that they are "merged"
// into one "video" content.
buzz::XmlElement* audio = WriteContentInfo(
PROTOCOL_GINGLE, contents.at(0), parsers, error);
if (!audio)
return false;
buzz::XmlElement* video = WriteContentInfo(
PROTOCOL_GINGLE, contents.at(1), parsers, error);
if (!video) {
delete audio;
return false;
}
CopyXmlChildren(audio, video);
elems->push_back(video);
delete audio;
} else {
return BadWrite("Gingle protocol may only have one content.", error);
}
return true;
}
const TransportInfo* GetTransportInfoByContentName(
const TransportInfos& tinfos, const std::string& content_name) {
for (TransportInfos::const_iterator tinfo = tinfos.begin();
tinfo != tinfos.end(); ++tinfo) {
if (content_name == tinfo->content_name) {
return &*tinfo;
}
}
return NULL;
}
bool WriteJingleContentPairs(const ContentInfos& contents,
const ContentParserMap& content_parsers,
const TransportInfos& tinfos,
const TransportParserMap& trans_parsers,
XmlElements* elems,
WriteError* error) {
for (ContentInfos::const_iterator content = contents.begin();
content != contents.end(); ++content) {
const TransportInfo* tinfo =
GetTransportInfoByContentName(tinfos, content->name);
if (!tinfo)
return BadWrite("No transport for content: " + content->name, error);
XmlElements pair_elems;
buzz::XmlElement* elem = WriteContentInfo(
PROTOCOL_JINGLE, *content, content_parsers, error);
if (!elem)
return false;
pair_elems.push_back(elem);
if (!WriteJingleTransportInfo(*tinfo, trans_parsers,
&pair_elems, error))
return false;
WriteJingleContentPair(content->name, pair_elems, elems);
}
return true;
}
bool WriteJingleGroupInfo(const ContentInfos& contents,
const ContentGroups& groups,
XmlElements* elems,
WriteError* error) {
if (!groups.empty()) {
buzz::XmlElement* pair_elem = new buzz::XmlElement(QN_JINGLE_DRAFT_GROUP);
pair_elem->SetAttr(QN_JINGLE_DRAFT_GROUP_TYPE, GROUP_TYPE_BUNDLE);
XmlElements pair_elems;
for (ContentInfos::const_iterator content = contents.begin();
content != contents.end(); ++content) {
buzz::XmlElement* child_elem =
new buzz::XmlElement(QN_JINGLE_CONTENT, false);
child_elem->SetAttr(QN_JINGLE_CONTENT_NAME, content->name);
pair_elems.push_back(child_elem);
}
AddXmlChildren(pair_elem, pair_elems);
elems->push_back(pair_elem);
}
return true;
}
bool ParseContentType(SignalingProtocol protocol,
const buzz::XmlElement* action_elem,
std::string* content_type,
ParseError* error) {
const buzz::XmlElement* content_elem;
if (protocol == PROTOCOL_GINGLE) {
if (!ParseContentType(action_elem, content_type, &content_elem, error))
return false;
// Internally, we only use NS_JINGLE_RTP.
if (*content_type == NS_GINGLE_AUDIO ||
*content_type == NS_GINGLE_VIDEO)
*content_type = NS_JINGLE_RTP;
} else {
const buzz::XmlElement* pair_elem
= action_elem->FirstNamed(QN_JINGLE_CONTENT);
if (pair_elem == NULL)
return BadParse("No contents found", error);
if (!ParseContentType(pair_elem, content_type, &content_elem, error))
return false;
// If there is more than one content type, return an error.
for (; pair_elem != NULL;
pair_elem = pair_elem->NextNamed(QN_JINGLE_CONTENT)) {
std::string content_type2;
if (!ParseContentType(pair_elem, &content_type2, &content_elem, error))
return false;
if (content_type2 != *content_type)
return BadParse("More than one content type found", error);
}
}
return true;
}
static bool ParseContentMessage(
SignalingProtocol protocol,
const buzz::XmlElement* action_elem,
bool expect_transports,
const ContentParserMap& content_parsers,
const TransportParserMap& trans_parsers,
SessionInitiate* init,
ParseError* error) {
init->owns_contents = true;
if (protocol == PROTOCOL_GINGLE) {
if (!ParseGingleContentInfos(action_elem, content_parsers,
&init->contents, error))
return false;
if (expect_transports &&
!ParseGingleTransportInfos(action_elem, init->contents, trans_parsers,
&init->transports, error))
return false;
} else {
if (!ParseJingleContentInfos(action_elem, content_parsers,
&init->contents, error))
return false;
if (!ParseJingleGroupInfos(action_elem, &init->groups, error))
return false;
if (expect_transports &&
!ParseJingleTransportInfos(action_elem, init->contents, trans_parsers,
&init->transports, error))
return false;
}
return true;
}
static bool WriteContentMessage(
SignalingProtocol protocol,
const ContentInfos& contents,
const TransportInfos& tinfos,
const ContentParserMap& content_parsers,
const TransportParserMap& transport_parsers,
const ContentGroups& groups,
XmlElements* elems,
WriteError* error) {
if (protocol == PROTOCOL_GINGLE) {
if (!WriteGingleContentInfos(contents, content_parsers, elems, error))
return false;
if (!WriteGingleTransportInfos(tinfos, transport_parsers,
elems, error))
return false;
} else {
if (!WriteJingleContentPairs(contents, content_parsers,
tinfos, transport_parsers,
elems, error))
return false;
if (!WriteJingleGroupInfo(contents, groups, elems, error))
return false;
}
return true;
}
bool ParseSessionInitiate(SignalingProtocol protocol,
const buzz::XmlElement* action_elem,
const ContentParserMap& content_parsers,
const TransportParserMap& trans_parsers,
SessionInitiate* init,
ParseError* error) {
bool expect_transports = true;
return ParseContentMessage(protocol, action_elem, expect_transports,
content_parsers, trans_parsers,
init, error);
}
bool WriteSessionInitiate(SignalingProtocol protocol,
const ContentInfos& contents,
const TransportInfos& tinfos,
const ContentParserMap& content_parsers,
const TransportParserMap& transport_parsers,
const ContentGroups& groups,
XmlElements* elems,
WriteError* error) {
return WriteContentMessage(protocol, contents, tinfos,
content_parsers, transport_parsers, groups,
elems, error);
}
bool ParseSessionAccept(SignalingProtocol protocol,
const buzz::XmlElement* action_elem,
const ContentParserMap& content_parsers,
const TransportParserMap& transport_parsers,
SessionAccept* accept,
ParseError* error) {
bool expect_transports = true;
return ParseContentMessage(protocol, action_elem, expect_transports,
content_parsers, transport_parsers,
accept, error);
}
bool WriteSessionAccept(SignalingProtocol protocol,
const ContentInfos& contents,
const TransportInfos& tinfos,
const ContentParserMap& content_parsers,
const TransportParserMap& transport_parsers,
const ContentGroups& groups,
XmlElements* elems,
WriteError* error) {
return WriteContentMessage(protocol, contents, tinfos,
content_parsers, transport_parsers, groups,
elems, error);
}
bool ParseSessionTerminate(SignalingProtocol protocol,
const buzz::XmlElement* action_elem,
SessionTerminate* term,
ParseError* error) {
if (protocol == PROTOCOL_GINGLE) {
const buzz::XmlElement* reason_elem = action_elem->FirstElement();
if (reason_elem != NULL) {
term->reason = reason_elem->Name().LocalPart();
const buzz::XmlElement *debug_elem = reason_elem->FirstElement();
if (debug_elem != NULL) {
term->debug_reason = debug_elem->Name().LocalPart();
}
}
return true;
} else {
const buzz::XmlElement* reason_elem =
action_elem->FirstNamed(QN_JINGLE_REASON);
if (reason_elem) {
reason_elem = reason_elem->FirstElement();
if (reason_elem) {
term->reason = reason_elem->Name().LocalPart();
}
}
return true;
}
}
void WriteSessionTerminate(SignalingProtocol protocol,
const SessionTerminate& term,
XmlElements* elems) {
if (protocol == PROTOCOL_GINGLE) {
elems->push_back(new buzz::XmlElement(buzz::QName(NS_GINGLE, term.reason)));
} else {
if (!term.reason.empty()) {
buzz::XmlElement* reason_elem = new buzz::XmlElement(QN_JINGLE_REASON);
reason_elem->AddElement(new buzz::XmlElement(
buzz::QName(NS_JINGLE, term.reason)));
elems->push_back(reason_elem);
}
}
}
bool ParseDescriptionInfo(SignalingProtocol protocol,
const buzz::XmlElement* action_elem,
const ContentParserMap& content_parsers,
const TransportParserMap& transport_parsers,
DescriptionInfo* description_info,
ParseError* error) {
bool expect_transports = false;
return ParseContentMessage(protocol, action_elem, expect_transports,
content_parsers, transport_parsers,
description_info, error);
}
bool ParseTransportInfos(SignalingProtocol protocol,
const buzz::XmlElement* action_elem,
const ContentInfos& contents,
const TransportParserMap& trans_parsers,
TransportInfos* tinfos,
ParseError* error) {
if (protocol == PROTOCOL_GINGLE) {
return ParseGingleTransportInfos(
action_elem, contents, trans_parsers, tinfos, error);
} else {
return ParseJingleTransportInfos(
action_elem, contents, trans_parsers, tinfos, error);
}
}
bool WriteTransportInfos(SignalingProtocol protocol,
const TransportInfos& tinfos,
const TransportParserMap& trans_parsers,
XmlElements* elems,
WriteError* error) {
if (protocol == PROTOCOL_GINGLE) {
return WriteGingleTransportInfos(tinfos, trans_parsers,
elems, error);
} else {
return WriteJingleTransportInfos(tinfos, trans_parsers,
elems, error);
}
}
bool GetUriTarget(const std::string& prefix, const std::string& str,
std::string* after) {
size_t pos = str.find(prefix);
if (pos == std::string::npos)
return false;
*after = str.substr(pos + prefix.size(), std::string::npos);
return true;
}
bool FindSessionRedirect(const buzz::XmlElement* stanza,
SessionRedirect* redirect) {
const buzz::XmlElement* error_elem = GetXmlChild(stanza, LN_ERROR);
if (error_elem == NULL)
return false;
const buzz::XmlElement* redirect_elem =
error_elem->FirstNamed(QN_GINGLE_REDIRECT);
if (redirect_elem == NULL)
redirect_elem = error_elem->FirstNamed(buzz::QN_STANZA_REDIRECT);
if (redirect_elem == NULL)
return false;
if (!GetUriTarget(STR_REDIRECT_PREFIX, redirect_elem->BodyText(),
&redirect->target))
return false;
return true;
}
} // namespace cricket<|fim▁end|>
|
AddXmlChildren(session, action_elems);
return session;
|
<|file_name|>worddisplayvm.js<|end_file_name|><|fim▁begin|>///<reference path="./otmword.ts" />
///<reference path="./wmmodules.ts" />
///<reference path="./wgenerator.ts" />
///<reference path="./ntdialog.ts" />
/**
* 単語作成部で使用するViewModel
*/
class WordDisplayVM {
/**
* コンストラクタ
* @param el バインディングを適用するタグのid
* @param dict OTM形式辞書クラス
* @param createSetting 単語文字列作成に使用する設定
*/
constructor(el, dict, createSetting, equivalent) {
this.el = el;
this.data = {
dictionary: dict,
isDisabled: false,
createSetting: createSetting,
id: 1,
equivalent: equivalent,
};
this.initMethods();
}
/**
* VMで使用するメソッドを定義するメソッド
*/
initMethods() {
this.methods = {
/**
* 単語文字列を作成するメソッド
*/
<|fim▁hole|> let form = "";
switch (this.createSetting.mode) {
case WordGenerator.SIMPLE_SYMBOL:
form = WordGenerator.simple(this.createSetting.simple);
break;
case WordGenerator.SIMPLECV_SYMBOL:
form = WordGenerator.simplecv(this.createSetting.simplecv);
break;
case WordGenerator.DEPENDENCYCV_SYMBOL:
form = WordGenerator.dependencycv(this.createSetting.dependencycv);
break;
default:
break;
}
let word = new OtmWord(this.id++, form);
word.add("");
this.dictionary.add(word);
},
/**
* 設定されている全ての訳語に対して単語を作成するメソッド
*/
createAll: function _createAll() {
this.equivalent.equivalentsList.data.forEach((x) => {
let form = "";
switch (this.createSetting.mode) {
case WordGenerator.SIMPLE_SYMBOL:
form = WordGenerator.simple(this.createSetting.simple);
break;
case WordGenerator.SIMPLECV_SYMBOL:
form = WordGenerator.simplecv(this.createSetting.simplecv);
break;
case WordGenerator.DEPENDENCYCV_SYMBOL:
form = WordGenerator.dependencycv(this.createSetting.dependencycv);
break;
default:
break;
}
let word = new OtmWord(this.id++, form);
word.add(x.equivalents.join(","));
this.dictionary.add(word);
});
},
/**
* 作成した全ての単語を削除するメソッド
*/
removeAll: function _removeAll() {
this.dictionary.removeAll();
// idを初期値にする
this.id = 1;
},
/**
* 作成した単語一覧をOTM-JSON形式で出力するメソッド
*/
outputOtmJSON: function _outputOtmJSON() {
// idを振り直す
let id = 1;
this.dictionary.words.forEach((x) => {
x.entry.id = id++;
});
WMModules.exportJSON(this.dictionary, "dict.json");
// 引き続き作成する場合を考えてidを更新する
this.id = id;
},
// 個々で使用する部分
/**
* 訳語選択ダイアログを呼び出すメソッド
* @param 訳語を設定する単語クラス
*/
showEquivalentDialog: function _showEquivalentDialog(word) {
this.equivalent.selectedWordId = word.entry.id.toString();
WMModules.equivalentDialog.show();
},
/**
* 単語を削除するメソッド
* @param 削除する単語クラス
*/
remove: function _remove(word) {
this.dictionary.remove(word.entry.id);
},
/**
* 単語の区切りの","で文字列を区切って配列にするためのメソッド
* @param 単語の訳語(カンマ区切り)
* @return カンマを区切り文字として分割した結果の文字列配列
*/
splitter: function _splitter(value) {
return value.split(",").map(function (x) { return x.trim(); });
},
};
}
}
//# sourceMappingURL=worddisplayvm.js.map<|fim▁end|>
|
create: function _create() {
|
<|file_name|>launcher.js<|end_file_name|><|fim▁begin|>// Generated by CoffeeScript 1.8.0
var db, monitor;
db = require('./db.js');
monitor = require('./monitor.js');
<|fim▁hole|> var m;
db.createClient();
db.clearQueue();
m = monitor.createMonitor().start();
});
};<|fim▁end|>
|
module.exports.launch = function() {
require('./argv_parser.js').parse(process.argv.slice(2), function() {
|
<|file_name|>Wall.js<|end_file_name|><|fim▁begin|>"use strict";
function Wall(layer, id) {
powerupjs.GameObject.call(this, layer, id);
// Some physical properties of the wall
this.strokeColor = 'none';
this.fillColor = 'none';
this.scoreFrontColor = "#FFC800";
this.scoreSideColor = "#B28C00";
this.defaultFrontColor = '#0018FF';
this.defaultSideColor = '#0010B2';
this.frontColor = this.defaultFrontColor;
this.sideColor = this.defaultSideColor;
this._minVelocity = -250;
this._minDiameterY = 200;
this._maxDiameterY = 300;
this.diameterX = 100;
this.diameterY = 200;
this.wallWidth = this.diameterX;
this.wallThickness = this.wallWidth / 9;
this.smartWall = false;
this.smartWallRate = 2;
this.initX = powerupjs.Game.size.x + 100;
this.position = this.randomHolePosition();
this.velocity = new powerupjs.Vector2(this._minVelocity, 0);
}
Wall.prototype = Object.create(powerupjs.GameObject.prototype);
Object.defineProperty(Wall.prototype, "minDiameterY",
{
get: function () {
return this._minDiameterY;
}
});
Object.defineProperty(Wall.prototype, "maxDiameterY",
{
get: function () {
return this._maxDiameterY;
}
});
Object.defineProperty(Wall.prototype, "resetXOffset",
{
get: function () {<|fim▁hole|> return this.initX - this.diameterX / 2;
}
});
Object.defineProperty(Wall.prototype, "boundingBox",
{
get: function () {
return new powerupjs.Rectangle(this.position.x - this.diameterX / 2,
this.position.y - this.diameterY / 2,
this.diameterX, this.diameterY);
}
});
Wall.prototype.reset = function() {
var playingState = powerupjs.GameStateManager.get(ID.game_state_playing);
this.frontColor = this.defaultFrontColor;
this.sideColor = this.defaultSideColor;
this.diameterY = this.randomHoleSize();
this.position = this.randomHolePosition();
this.scored = false;
// Smart wall = moving hole
if (playingState.score.score >= playingState.initSmartWallScore )
this.smartWall = true;
else
this.smartWall = false;
};
Wall.prototype.update = function(delta) {
//GameObject.prototype.update.call(this,delta);
// Contains all playing objects
var playingState = powerupjs.GameStateManager.get(ID.game_state_playing);
// If wall goes off screen
if (playingState.isOutsideWorld(this)) {
this.reset();
//this.velocity = new Vector2(this._minVelocity - (20 * score.score), 0);
}
// Move the hole
if (this.smartWall)
this.moveHole(delta);
// Determine if user collides with wall.
if (this.position.x <= playingState.user.boundingBox.right &&
this.position.x > playingState.user.boundingBox.center.x) {
if (this.isColliding(playingState.user)) {
playingState.lives -= 1;
playingState.isDying = true;
}
// If no collision and wall is behind user, score it.
} else if (this.position.x <= playingState.user.boundingBox.center.x && !this.scored) {
playingState.score.score += 1;
this.frontColor = this.scoreFrontColor;
this.sideColor = this.scoreSideColor;
this.scored = true;
sounds.beep.play();
}
// Add moving hole
//this.position.y += 1;
};
Wall.prototype.draw = function() {
powerupjs.Canvas2D.drawPath(this.calculateWallPath('topFront'), this.frontColor, this.strokeColor);
powerupjs.Canvas2D.drawPath(this.calculateWallPath('holeLeft'), this.frontColor, this.strokeColor);
powerupjs.Canvas2D.drawPath(this.calculateWallPath('bottomFront'), this.frontColor, this.strokeColor);
powerupjs.Canvas2D.drawPath(this.calculateWallPath('holeSide'), this.sideColor, this.strokeColor);
};
Wall.prototype.moveHole = function(delta) {
if (this.boundingBox.bottom > powerupjs.Game.size.y ||
this.boundingBox.top < 0)
this.smartWallRate *= -1;
this.position.y += this.smartWallRate;
};
Wall.prototype.isColliding = function(user) {
var userCenter = { x : user.boundingBox.center.x,
y : user.boundingBox.center.y};
var holeTop = this.position.y - this.diameterY / 2;
var holeBottom = this.position.y + this.diameterY / 2;
var overlap = this.position.x - userCenter.x;
var theta = Math.acos(overlap / (user.width / 2));
var userTop = userCenter.y - (user.height / 2) * Math.sin(theta);
var userBottom = (user.height / 2) * Math.sin(theta) + userCenter.y;
if (userTop > holeTop && userBottom < holeBottom) {
return false;
} else {
return true;
}
};
Wall.prototype.randomHoleSize = function() {
//console.log(Math.floor(Math.random() * (this.maxDiameterY - this.minDiameterY)) + this.minDiameterY);
return Math.floor(Math.random() * (this.maxDiameterY - this.minDiameterY)) + this.minDiameterY;
};
Wall.prototype.randomHolePosition = function() {
var newY = Math.random() * (powerupjs.Game.size.y - this.diameterY) +
this.diameterY / 2;
return new powerupjs.Vector2(this.initX, newY);
};
/*Wall.prototype.calculateRandomPosition = function() {
//var calcNewY = this.calculateRandomY;
var enemy = this.root.find(ID.enemy1);
if (enemy) {
console.log("here");
var newY = null;
while (! newY || (((newY - this.diameterY / 2) <= enemy.position.y + enemy.height) &&
((newY + this.diameterY / 2) >= enemy.position.y) )) {
newY = this.calculateRandomY();
console.log(newY);
}
return new powerupjs.Vector2(this.initX, newY);
} else {
return new powerupjs.Vector2(this.initX, this.calculateRandomY());
}
};*/
Wall.prototype.calculateWallPath = function(type) {
var xPoints = [];
var yPoints = [];
var pathType = [];
// Default values
// Wall bounds
var thick = this.wallThickness;
var left = this.position.x - this.diameterX / 2;
var right = this.position.x + (this.diameterX / 2) + thick;
var shiftedCenter = left + (this.diameterX / 2) + thick;
var top = 0;
var bottom = powerupjs.Game.size.y;
// Circle bounds
var cBase = this.position.y + this.diameterY / 2;
var cTop = this.position.y - this.diameterY / 2;
var cLeft = shiftedCenter - this.diameterX / 2;
var cRight = shiftedCenter + this.diameterX / 2;
switch(type){
case "holeLeft" :
top = cTop - 5;
bottom = cBase + 5;
right = shiftedCenter;
xPoints = [left, left, right, right,
[cLeft, cLeft, right],
right, left];
yPoints = [top, bottom, bottom, cBase,
[cBase, cTop, cTop],
top, top];
pathType = ['line','line','line','bezierCurve','line', 'line'];
break;
case "holeRight" :
top = cTop - 1;
bottom = cBase + 1;
left = shiftedCenter;
xPoints = [left, left,
[cRight, cRight, left],
left, right, right, left];
yPoints = [top, cTop,
[cTop, cBase, cBase],
bottom, bottom, top, top];
pathType = ['line','bezierCurve','line', 'line', 'line', 'line'];
break;
case "holeSide" :
thick = thick;
right = shiftedCenter;
xPoints = [right - thick,
[cLeft - thick, cLeft - thick, right - thick],
right,
[cLeft, cLeft, right],
right - thick];
yPoints = [cTop,
[cTop, cBase, cBase],
cBase,
[cBase, cTop, cTop],
cTop];
pathType = ['bezierCurve','line','bezierCurve','line'];
break;
case "topFront" :
bottom = cTop;
//right = shiftedCenter + 5;
xPoints = [left, left, right, right, left];
yPoints = [top, bottom, bottom, top, top];
pathType = ['line','line','line','line'];
break;
case "bottomFront" :
top = cBase;
//right = shiftedCenter + 5;
xPoints = [left, left, right, right, left];
yPoints = [top, bottom, bottom, top, top];
pathType = ['line','line','line','line'];
break;
case "rightSide" :
right = right - 1;
left = right;
right = right + thick;
xPoints = [left, left, right, right, left];
yPoints = [top, bottom, bottom, top, top];
pathType = ['line','line','line','line'];
break;
}
return { xPoints : xPoints, yPoints : yPoints,
pathType : pathType };
};<|fim▁end|>
| |
<|file_name|>issues.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015 - present Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import codecs
import datetime
import itertools
import operator
import os
import re
import sys
try:
from lxml import etree
except ImportError:
etree = None
from . import colorize, config, source, utils
ISSUE_KIND_ERROR = 'ERROR'
ISSUE_KIND_WARNING = 'WARNING'
ISSUE_KIND_INFO = 'INFO'
ISSUE_KIND_ADVICE = 'ADVICE'
# field names in rows of json reports
JSON_INDEX_DOTTY = 'dotty'
JSON_INDEX_FILENAME = 'file'
JSON_INDEX_HASH = 'hash'
JSON_INDEX_INFER_SOURCE_LOC = 'infer_source_loc'
JSON_INDEX_ISL_FILE = 'file'
JSON_INDEX_ISL_LNUM = 'lnum'
JSON_INDEX_ISL_CNUM = 'cnum'
JSON_INDEX_ISL_ENUM = 'enum'
JSON_INDEX_KIND = 'kind'
JSON_INDEX_LINE = 'line'
JSON_INDEX_PROCEDURE = 'procedure'
JSON_INDEX_PROCEDURE_ID = 'procedure_id'
JSON_INDEX_QUALIFIER = 'qualifier'
JSON_INDEX_QUALIFIER_TAGS = 'qualifier_tags'
JSON_INDEX_TYPE = 'bug_type'
JSON_INDEX_TRACE = 'bug_trace'
JSON_INDEX_TRACE_LEVEL = 'level'
JSON_INDEX_TRACE_FILENAME = 'filename'
JSON_INDEX_TRACE_LINE = 'line_number'
JSON_INDEX_TRACE_DESCRIPTION = 'description'
JSON_INDEX_VISIBILITY = 'visibility'
ISSUE_TYPES_URL = 'http://fbinfer.com/docs/infer-issue-types.html#'
def _text_of_infer_loc(loc):
return ' ({}:{}:{}-{}:)'.format(
loc[JSON_INDEX_ISL_FILE],
loc[JSON_INDEX_ISL_LNUM],
loc[JSON_INDEX_ISL_CNUM],
loc[JSON_INDEX_ISL_ENUM],
)
def text_of_report(report):
filename = report[JSON_INDEX_FILENAME]
kind = report[JSON_INDEX_KIND]
line = report[JSON_INDEX_LINE]
error_type = report[JSON_INDEX_TYPE]
msg = report[JSON_INDEX_QUALIFIER]
infer_loc = ''
if JSON_INDEX_INFER_SOURCE_LOC in report:
infer_loc = _text_of_infer_loc(report[JSON_INDEX_INFER_SOURCE_LOC])
return '%s:%d: %s: %s%s\n %s' % (
filename,
line,
kind.lower(),
error_type,
infer_loc,
msg,
)
def _text_of_report_list(project_root, reports, bugs_txt_path, limit=None,
formatter=colorize.TERMINAL_FORMATTER):
n_issues = len(reports)
if n_issues == 0:
if formatter == colorize.TERMINAL_FORMATTER:
out = colorize.color(' No issues found ',
colorize.SUCCESS, formatter)
return out + '\n'
else:
return 'No issues found'
text_errors_list = []
for report in reports[:limit]:
filename = report[JSON_INDEX_FILENAME]
line = report[JSON_INDEX_LINE]
source_context = ''
source_context = source.build_source_context(
os.path.join(project_root, filename),
formatter,
line,
)
indenter = source.Indenter() \
.indent_push() \
.add(source_context)
source_context = '\n' + unicode(indenter)
msg = text_of_report(report)
if report[JSON_INDEX_KIND] == ISSUE_KIND_ERROR:
msg = colorize.color(msg, colorize.ERROR, formatter)
elif report[JSON_INDEX_KIND] == ISSUE_KIND_WARNING:
msg = colorize.color(msg, colorize.WARNING, formatter)
elif report[JSON_INDEX_KIND] == ISSUE_KIND_ADVICE:
msg = colorize.color(msg, colorize.ADVICE, formatter)
text = '%s%s' % (msg, source_context)
text_errors_list.append(text)
error_types_count = {}<|fim▁hole|> # which assertion failed
if t == 'Assert_failure' and JSON_INDEX_INFER_SOURCE_LOC in report:
t += _text_of_infer_loc(report[JSON_INDEX_INFER_SOURCE_LOC])
if t not in error_types_count:
error_types_count[t] = 1
else:
error_types_count[t] += 1
max_type_length = max(map(len, error_types_count.keys())) + 2
sorted_error_types = error_types_count.items()
sorted_error_types.sort(key=operator.itemgetter(1), reverse=True)
types_text_list = map(lambda (t, count): '%s: %d' % (
t.rjust(max_type_length),
count,
), sorted_error_types)
text_errors = '\n\n'.join(text_errors_list)
if limit >= 0 and n_issues > limit:
text_errors += colorize.color(
('\n\n...too many issues to display (limit=%d exceeded), please ' +
'see %s or run `inferTraceBugs` for the remaining issues.')
% (limit, bugs_txt_path), colorize.HEADER, formatter)
issues_found = 'Found {n_issues}'.format(
n_issues=utils.get_plural('issue', n_issues),
)
msg = '{issues_found}\n\n{issues}\n\n{header}\n\n{summary}'.format(
issues_found=colorize.color(issues_found,
colorize.HEADER,
formatter),
issues=text_errors,
header=colorize.color('Summary of the reports',
colorize.HEADER, formatter),
summary='\n'.join(types_text_list),
)
return msg
def _is_user_visible(project_root, report):
kind = report[JSON_INDEX_KIND]
return kind in [ISSUE_KIND_ERROR, ISSUE_KIND_WARNING, ISSUE_KIND_ADVICE]
def print_and_save_errors(infer_out, project_root, json_report, bugs_out,
pmd_xml):
errors = utils.load_json_from_path(json_report)
errors = [e for e in errors if _is_user_visible(project_root, e)]
console_out = _text_of_report_list(project_root, errors, bugs_out,
limit=10)
utils.stdout('\n' + console_out)
plain_out = _text_of_report_list(project_root, errors, bugs_out,
formatter=colorize.PLAIN_FORMATTER)
with codecs.open(bugs_out, 'w',
encoding=config.CODESET, errors='replace') as file_out:
file_out.write(plain_out)
if pmd_xml:
xml_out = os.path.join(infer_out, config.PMD_XML_FILENAME)
with codecs.open(xml_out, 'w',
encoding=config.CODESET,
errors='replace') as file_out:
file_out.write(_pmd_xml_of_issues(errors))
def merge_reports_from_paths(report_paths):
json_data = []
for json_path in report_paths:
json_data.extend(utils.load_json_from_path(json_path))
return _sort_and_uniq_rows(json_data)
def _pmd_xml_of_issues(issues):
if etree is None:
print('ERROR: "etree" Python package not found.')
print('ERROR: You need to install it to use Infer with --pmd-xml')
sys.exit(1)
root = etree.Element('pmd')
root.attrib['version'] = '5.4.1'
root.attrib['date'] = datetime.datetime.now().isoformat()
for issue in issues:
fully_qualifed_method_name = re.search('(.*)\(.*',
issue[JSON_INDEX_PROCEDURE_ID])
class_name = ''
package = ''
if fully_qualifed_method_name is not None:
# probably Java
info = fully_qualifed_method_name.groups()[0].split('.')
class_name = info[-2:-1][0]
method = info[-1]
package = '.'.join(info[0:-2])
else:
method = issue[JSON_INDEX_PROCEDURE]
file_node = etree.Element('file')
file_node.attrib['name'] = issue[JSON_INDEX_FILENAME]
violation = etree.Element('violation')
violation.attrib['begincolumn'] = '0'
violation.attrib['beginline'] = str(issue[JSON_INDEX_LINE])
violation.attrib['endcolumn'] = '0'
violation.attrib['endline'] = str(issue[JSON_INDEX_LINE] + 1)
violation.attrib['class'] = class_name
violation.attrib['method'] = method
violation.attrib['package'] = package
violation.attrib['priority'] = '1'
violation.attrib['rule'] = issue[JSON_INDEX_TYPE]
violation.attrib['ruleset'] = 'Infer Rules'
violation.attrib['externalinfourl'] = (
ISSUE_TYPES_URL + issue[JSON_INDEX_TYPE])
violation.text = issue[JSON_INDEX_QUALIFIER]
file_node.append(violation)
root.append(file_node)
return etree.tostring(root, pretty_print=True, encoding=config.CODESET)
def _sort_and_uniq_rows(l):
key = operator.itemgetter(JSON_INDEX_FILENAME,
JSON_INDEX_LINE,
JSON_INDEX_HASH,
JSON_INDEX_QUALIFIER)
l.sort(key=key)
groups = itertools.groupby(l, key)
# guaranteed to be at least one element in each group
return map(lambda (keys, dups): dups.next(), groups)<|fim▁end|>
|
for report in reports:
t = report[JSON_INDEX_TYPE]
# assert failures are not very informative without knowing
|
<|file_name|>cli.ts<|end_file_name|><|fim▁begin|>/// <reference path="../typings/node/node.d.ts" />
import generator = require("./sequelize-auto-ts");
import fs = require("fs");
let prompt = require("prompt");
if (process.argv.length > 2)
{
processFromCommandLines();
}
else
{
processFromPrompt();
}
function processFromCommandLines()
{
let args:Array<string> = process.argv.slice(2);
let modelFactory:boolean = false;
let i = args.indexOf('-mf');
if (i !== -1) {
modelFactory = true;
args.splice(i, 1);
}
if (args.length < 4)
{
showHelp();
process.exit(1);
}
let options:generator.GenerateOptions =
{
database: args[0],
username: args[1],
password: args[2],
targetDirectory: args[3],
modelFactory: modelFactory,
options: { host: args[4]}
};
generate(options);
}
function processFromPrompt()
{
let schema = {
properties: {
database: { description: "Database name", required: true },
username: { description: "Username", required: true },
password: { description: "Password", required: false, hidden: true },
targetDirectory: { description: "Target directory", required: true },
options: { description: "host", required: true }
}
};
prompt.start();
prompt.get(schema, function(err, result)
{
result.options = null;
generate(<generator.GenerateOptions>result);
})
}
function generate(options:generator.GenerateOptions):void
<|fim▁hole|> console.log("Username: " + options.username);
console.log("Password: <hidden>");
console.log("Target : " + options.targetDirectory);
console.log("");
if (!fs.existsSync(options.targetDirectory))
{
showHelp();
throw Error("Target directory does not exist: " + options.targetDirectory);
}
generator.generate(options, function(err)
{
if (err)
{
throw err;
}
});
}
function showHelp():void
{
}<|fim▁end|>
|
{
console.log("Database: " + options.database);
|
<|file_name|>activeCoin.js<|end_file_name|><|fim▁begin|>import {
DASHBOARD_ACTIVE_COIN_CHANGE,
DASHBOARD_ACTIVE_COIN_BALANCE,
DASHBOARD_ACTIVE_COIN_SEND_FORM,
DASHBOARD_ACTIVE_COIN_RECEIVE_FORM,
DASHBOARD_ACTIVE_COIN_RESET_FORMS,
DASHBOARD_ACTIVE_SECTION,
DASHBOARD_ACTIVE_TXINFO_MODAL,
ACTIVE_COIN_GET_ADDRESSES,
DASHBOARD_ACTIVE_COIN_NATIVE_BALANCE,
DASHBOARD_ACTIVE_COIN_NATIVE_TXHISTORY,
DASHBOARD_ACTIVE_COIN_NATIVE_OPIDS,
DASHBOARD_ACTIVE_COIN_SENDTO,
DASHBOARD_ACTIVE_ADDRESS,
DASHBOARD_ACTIVE_COIN_GETINFO_FAILURE,
SYNCING_NATIVE_MODE,
DASHBOARD_UPDATE,
DASHBOARD_ELECTRUM_BALANCE,
DASHBOARD_ELECTRUM_TRANSACTIONS,
DASHBOARD_REMOVE_COIN,
DASHBOARD_ACTIVE_COIN_NET_PEERS,
DASHBOARD_ACTIVE_COIN_NET_TOTALS,
KV_HISTORY,
DASHBOARD_ETHEREUM_BALANCE,
DASHBOARD_ETHEREUM_TRANSACTIONS,
DASHBOARD_CLEAR_ACTIVECOIN,
} from '../actions/storeType';
// TODO: refactor current coin props copy on change
const defaults = {
native: {
coin: null,
mode: null,
send: false,
receive: false,
balance: 0,
addresses: null,
activeSection: 'default',
showTransactionInfo: false,
showTransactionInfoTxIndex: null,
txhistory: [],
opids: null,
lastSendToResponse: null,
progress: null,
rescanInProgress: false,
getinfoFetchFailures: 0,
net: {
peers: null,
totals: null,
},
},
spv: {
coin: null,
mode: null,
send: false,
receive: false,<|fim▁hole|> showTransactionInfoTxIndex: null,
txhistory: [],
lastSendToResponse: null,
},
eth: {
coin: null,
mode: null,
send: false,
receive: false,
balance: 0,
addresses: null,
activeSection: 'default',
showTransactionInfo: false,
showTransactionInfoTxIndex: null,
txhistory: [],
lastSendToResponse: null,
},
};
const checkCoinObjectKeys = (obj, mode) => {
if (Object.keys(obj).length &&
mode) {
for (let key in obj) {
if (!defaults[mode].hasOwnProperty(key)) {
delete obj[key];
}
}
}
return obj;
};
export const ActiveCoin = (state = {
coins: {
native: {},
spv: {},
eth: {},
},
coin: null,
mode: null,
send: false,
receive: false,
balance: 0,
addresses: null,
activeSection: 'default',
showTransactionInfo: false,
showTransactionInfoTxIndex: null,
txhistory: [],
opids: null,
lastSendToResponse: null,
activeAddress: null,
progress: null,
rescanInProgress: false,
getinfoFetchFailures: 0,
net: {
peers: null,
totals: null,
},
}, action) => {
switch (action.type) {
case DASHBOARD_REMOVE_COIN:
delete state.coins[action.mode][action.coin];
if (state.coin === action.coin) {
return {
...state,
...defaults[action.mode],
};
} else {
return {
...state,
};
}
case DASHBOARD_ACTIVE_COIN_CHANGE:
if (state.coins[action.mode] &&
state.coins[action.mode][action.coin]) {
let _coins = state.coins;
if (action.mode === state.mode) {
const _coinData = state.coins[action.mode][action.coin];
const _coinDataToStore = checkCoinObjectKeys({
addresses: state.addresses,
coin: state.coin,
mode: state.mode,
balance: state.balance,
txhistory: state.txhistory,
send: state.send,
receive: state.receive,
showTransactionInfo: state.showTransactionInfo,
showTransactionInfoTxIndex: state.showTransactionInfoTxIndex,
activeSection: state.activeSection,
lastSendToResponse: state.lastSendToResponse,
opids: state.mode === 'native' ? state.opids : null,
activeAddress: state.activeAddress,
progress: state.mode === 'native' ? state.progress : null,
rescanInProgress: state.mode === 'native' ? state.rescanInProgress : false,
getinfoFetchFailures: state.mode === 'native' ? state.getinfoFetchFailures : 0,
net: state.mode === 'native' ? state.net : {},
}, action.mode);
if (!action.skip) {
_coins[action.mode][state.coin] = _coinDataToStore;
}
delete _coins.undefined;
return {
...state,
coins: _coins,
...checkCoinObjectKeys({
addresses: _coinData.addresses,
coin: _coinData.coin,
mode: _coinData.mode,
balance: _coinData.balance,
txhistory: _coinData.txhistory,
send: _coinData.send,
receive: _coinData.receive,
showTransactionInfo: _coinData.showTransactionInfo,
showTransactionInfoTxIndex: _coinData.showTransactionInfoTxIndex,
activeSection: _coinData.activeSection,
lastSendToResponse: _coinData.lastSendToResponse,
opids: _coinData.mode === 'native' ? _coinData.opids : null,
activeAddress: _coinData.activeAddress,
progress: _coinData.mode === 'native' ? _coinData.progress : null,
rescanInProgress: _coinData.mode === 'native' ? _coinData.rescanInProgress : false,
getinfoFetchFailures: _coinData.mode === 'native' ? _coinData.getinfoFetchFailures : 0,
net: _coinData.mode === 'native' ? _coinData.net : {},
}, _coinData.mode),
};
} else {
delete _coins.undefined;
return {
...state,
coins: state.coins,
...checkCoinObjectKeys({
coin: action.coin,
mode: action.mode,
balance: 0,
addresses: null,
txhistory: 'loading',
send: false,
receive: false,
showTransactionInfo: false,
showTransactionInfoTxIndex: null,
activeSection: 'default',
progress: null,
rescanInProgress: false,
net: {
peers: null,
totals: null,
},
}, action.mode),
};
}
} else {
if (state.coin) {
const _coinData = checkCoinObjectKeys({
addresses: state.addresses,
coin: state.coin,
mode: state.mode,
balance: state.balance,
txhistory: state.txhistory,
send: state.send,
receive: state.receive,
showTransactionInfo: state.showTransactionInfo,
showTransactionInfoTxIndex: state.showTransactionInfoTxIndex,
activeSection: state.activeSection,
lastSendToResponse: state.lastSendToResponse,
opids: state.mode === 'native' ? state.opids : null,
activeAddress: state.activeAddress,
progress: state.mode === 'native' ? state.progress : null,
rescanInProgress: state.mode === 'native' ? state.rescanInProgress : false,
getinfoFetchFailures: state.mode === 'native' ? state.getinfoFetchFailures : 0,
net: state.mode === 'native' ? state.net : {},
}, state.mode);
let _coins = state.coins;
if (!action.skip &&
_coins[action.mode]) {
_coins[action.mode][state.coin] = _coinData;
}
return {
...state,
coins: _coins,
...checkCoinObjectKeys({
coin: action.coin,
mode: action.mode,
balance: 0,
addresses: null,
txhistory: 'loading',
send: false,
receive: false,
showTransactionInfo: false,
showTransactionInfoTxIndex: null,
activeSection: 'default',
progress: null,
rescanInProgress: false,
net: {
peers: null,
totals: null,
},
}, action.mode),
};
} else {
return {
...state,
...checkCoinObjectKeys({
coin: action.coin,
mode: action.mode,
balance: 0,
addresses: null,
txhistory: 'loading',
send: false,
receive: false,
showTransactionInfo: false,
showTransactionInfoTxIndex: null,
activeSection: 'default',
progress: null,
rescanInProgress: false,
net: {
peers: null,
totals: null,
},
}, action.mode),
};
}
}
case DASHBOARD_ELECTRUM_BALANCE:
return {
...state,
balance: action.balance,
};
case DASHBOARD_ELECTRUM_TRANSACTIONS:
return {
...state,
txhistory: action.txhistory,
};
case DASHBOARD_ACTIVE_COIN_BALANCE:
return {
...state,
balance: action.balance,
};
case DASHBOARD_ACTIVE_COIN_SEND_FORM:
return {
...state,
send: action.send,
receive: false,
};
case DASHBOARD_ACTIVE_COIN_RECEIVE_FORM:
return {
...state,
send: false,
receive: action.receive,
};
case DASHBOARD_ACTIVE_COIN_RESET_FORMS:
return {
...state,
send: false,
receive: false,
};
case ACTIVE_COIN_GET_ADDRESSES:
return {
...state,
addresses: action.addresses,
};
case DASHBOARD_ACTIVE_SECTION:
return {
...state,
activeSection: action.section,
};
case DASHBOARD_ACTIVE_TXINFO_MODAL:
return {
...state,
showTransactionInfo: action.showTransactionInfo,
showTransactionInfoTxIndex: action.showTransactionInfoTxIndex,
};
case DASHBOARD_ACTIVE_COIN_NATIVE_BALANCE:
return {
...state,
balance: action.balance,
};
case DASHBOARD_ACTIVE_COIN_NATIVE_TXHISTORY:
return {
...state,
txhistory: action.txhistory,
};
case DASHBOARD_ACTIVE_COIN_NATIVE_OPIDS:
return {
...state,
opids: action.opids,
};
case DASHBOARD_ACTIVE_COIN_SENDTO:
return {
...state,
lastSendToResponse: action.lastSendToResponse,
};
case DASHBOARD_ACTIVE_ADDRESS:
return {
...state,
activeAddress: action.address,
};
case SYNCING_NATIVE_MODE:
return {
...state,
progress: state.mode === 'native' ? action.progress : null,
getinfoFetchFailures: typeof action.progress === 'string' && action.progress.indexOf('"code":-777') ? state.getinfoFetchFailures + 1 : 0,
};
case DASHBOARD_ACTIVE_COIN_GETINFO_FAILURE:
return {
...state,
getinfoFetchFailures: state.getinfoFetchFailures + 1,
};
case DASHBOARD_UPDATE:
if (state.coin === action.coin) {
return {
...state,
opids: action.opids,
txhistory: action.txhistory,
balance: action.balance,
addresses: action.addresses,
rescanInProgress: action.rescanInProgress,
};
}
case DASHBOARD_ACTIVE_COIN_NET_PEERS:
return {
...state,
net: {
peers: action.peers,
totals: state.net.totals,
},
};
case DASHBOARD_ACTIVE_COIN_NET_TOTALS:
return {
...state,
net: {
peers: state.net.peers,
totals: action.totals,
},
};
case DASHBOARD_ETHEREUM_BALANCE:
return {
...state,
balance: action.balance,
};
case DASHBOARD_ETHEREUM_TRANSACTIONS:
return {
...state,
txhistory: action.txhistory,
};
case DASHBOARD_CLEAR_ACTIVECOIN:
return {
coins: {
native: {},
spv: {},
eth: {},
},
coin: null,
mode: null,
balance: 0,
addresses: null,
txhistory: 'loading',
send: false,
receive: false,
showTransactionInfo: false,
showTransactionInfoTxIndex: null,
activeSection: 'default',
progress: null,
rescanInProgress: false,
net: {
peers: null,
totals: null,
},
getinfoFetchFailures: 0,
};
default:
return state;
}
}
export default ActiveCoin;<|fim▁end|>
|
balance: 0,
addresses: null,
activeSection: 'default',
showTransactionInfo: false,
|
<|file_name|>tensor_test.go<|end_file_name|><|fim▁begin|>/*
Copyright 2016 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package tensorflow
import (
"bytes"
"fmt"
"io"
"reflect"
"testing"
)
func TestNewTensor(t *testing.T) {
var tests = []struct {
shape []int64
value interface{}
}{
{nil, bool(true)},
{nil, int8(5)},
{nil, int16(5)},
{nil, int32(5)},
{nil, int64(5)},
{nil, uint8(5)},
{nil, uint16(5)},
{nil, uint32(5)},
{nil, uint64(5)},
{nil, float32(5)},
{nil, float64(5)},
{nil, complex(float32(5), float32(6))},
{nil, complex(float64(5), float64(6))},
{nil, "a string"},
{[]int64{1}, []uint32{1}},
{[]int64{1}, []uint64{1}},
{[]int64{2}, []bool{true, false}},
{[]int64{1}, []float64{1}},
{[]int64{1}, [1]float64{1}},
{[]int64{1, 1}, [1][1]float64{{1}}},
{[]int64{1, 1, 1}, [1][1][]float64{{{1}}}},
{[]int64{1, 1, 2}, [1][][2]float64{{{1, 2}}}},
{[]int64{1, 1, 1, 1}, [1][][1][]float64{{{{1}}}}},
{[]int64{2}, []string{"string", "slice"}},
{[]int64{2}, [2]string{"string", "array"}},
{[]int64{3, 2}, [][]float64{{1, 2}, {3, 4}, {5, 6}}},
{[]int64{2, 3}, [2][3]float64{{1, 2, 3}, {3, 4, 6}}},
{[]int64{4, 3, 2}, [][][]float64{
{{1, 2}, {3, 4}, {5, 6}},
{{7, 8}, {9, 10}, {11, 12}},
{{0, -1}, {-2, -3}, {-4, -5}},
{{-6, -7}, {-8, -9}, {-10, -11}},
}},
{[]int64{2, 0}, [][]int64{{}, {}}},
{[]int64{2, 2}, [][]string{{"row0col0", "row0,col1"}, {"row1col0", "row1,col1"}}},
{[]int64{2, 3}, [2][3]string{
{"row0col0", "row0,col1", "row0,col2"},
{"row1col0", "row1,col1", "row1,col2"},
}},
}
var errorTests = []interface{}{
struct{ a int }{5},
new(int32),
new([]int32),
// native ints not supported
int(5),
[]int{5},
// Mismatched dimensions
[][]float32{{1, 2, 3}, {4}},
// Mismatched dimensions. Should return "mismatched slice lengths" error instead of "BUG"
[][][]float32{{{1, 2}, {3, 4}}, {{1}, {3}}},
// Mismatched dimensions. Should return error instead of valid tensor
[][][]float32{{{1, 2}, {3, 4}}, {{1}, {3}}, {{1, 2, 3}, {2, 3, 4}}},
// Mismatched dimensions for strings
[][]string{{"abc"}, {"abcd", "abcd"}},
}
for _, test := range tests {
tensor, err := NewTensor(test.value)
if err != nil {
t.Errorf("NewTensor(%v): %v", test.value, err)
continue
}
if !reflect.DeepEqual(test.shape, tensor.Shape()) {
t.Errorf("Tensor.Shape(): got %v, want %v", tensor.Shape(), test.shape)
}
// Test that encode and decode gives the same value. We skip arrays because
// they're returned as slices.
if reflect.TypeOf(test.value).Kind() != reflect.Array {
got := tensor.Value()
if !reflect.DeepEqual(test.value, got) {
t.Errorf("encode/decode: got %v, want %v", got, test.value)
}
}
}
for _, test := range errorTests {
tensor, err := NewTensor(test)
if err == nil {
t.Errorf("NewTensor(%v): %v", test, err)
}
if tensor != nil {
t.Errorf("NewTensor(%v) = %v, want nil", test, tensor)
}
}
}
func TestTensorSerialization(t *testing.T) {
var tests = []interface{}{
bool(true),
int8(5),
int16(5),
int32(5),
int64(5),
uint8(5),
uint16(5),
float32(5),
float64(5),
complex(float32(5), float32(6)),
complex(float64(5), float64(6)),
[]float64{1},
[][]float32{{1, 2}, {3, 4}, {5, 6}},
[][][]int8{
{{1, 2}, {3, 4}, {5, 6}},
{{7, 8}, {9, 10}, {11, 12}},
{{0, -1}, {-2, -3}, {-4, -5}},
{{-6, -7}, {-8, -9}, {-10, -11}},
},
[]bool{true, false, true},
}
for _, v := range tests {
t1, err := NewTensor(v)
if err != nil {
t.Errorf("(%v): %v", v, err)
continue
}
buf := new(bytes.Buffer)
n, err := t1.WriteContentsTo(buf)
if err != nil {
t.Errorf("(%v): %v", v, err)
continue
}
if n != int64(buf.Len()) {
t.Errorf("(%v): WriteContentsTo said it wrote %v bytes, but wrote %v", v, n, buf.Len())
}
t2, err := ReadTensor(t1.DataType(), t1.Shape(), buf)
if err != nil {
t.Errorf("(%v): %v", v, err)
continue
}
if buf.Len() != 0 {
t.Errorf("(%v): %v bytes written by WriteContentsTo not read by ReadTensor", v, buf.Len())
}
if got, want := t2.DataType(), t1.DataType(); got != want {
t.Errorf("(%v): Got %v, want %v", v, got, want)
}
if got, want := t2.Shape(), t1.Shape(); !reflect.DeepEqual(got, want) {
t.Errorf("(%v): Got %v, want %v", v, got, want)
}
if got, want := t2.Value(), v; !reflect.DeepEqual(got, want) {
t.Errorf("(%v): Got %v, want %v", v, got, want)
}
}
}
func TestReadTensorDoesNotReadBeyondContent(t *testing.T) {
t1, _ := NewTensor(int8(7))
t2, _ := NewTensor(float32(2.718))
buf := new(bytes.Buffer)
if _, err := t1.WriteContentsTo(buf); err != nil {
t.Fatal(err)
}
if _, err := t2.WriteContentsTo(buf); err != nil {
t.Fatal(err)
}
t3, err := ReadTensor(t1.DataType(), t1.Shape(), buf)
if err != nil {
t.Fatal(err)
}
t4, err := ReadTensor(t2.DataType(), t2.Shape(), buf)
if err != nil {
t.Fatal(err)
}
if v, ok := t3.Value().(int8); !ok || v != 7 {
t.Errorf("Got (%v (%T), %v), want (7 (int8), true)", v, v, ok)
}
if v, ok := t4.Value().(float32); !ok || v != 2.718 {
t.Errorf("Got (%v (%T), %v), want (2.718 (float32), true)", v, v, ok)
}
}
func TestTensorSerializationErrors(t *testing.T) {
// String tensors cannot be serialized
t1, err := NewTensor("abcd")
if err != nil {
t.Fatal(err)
}
buf := new(bytes.Buffer)
if n, err := t1.WriteContentsTo(buf); n != 0 || err == nil || buf.Len() != 0 {
t.Errorf("Got (%v, %v, %v) want (0, <non-nil>, 0)", n, err, buf.Len())
}
// Should fail to read a truncated value.
if t1, err = NewTensor(int8(8)); err != nil {
t.Fatal(err)
}
n, err := t1.WriteContentsTo(buf)
if err != nil {
t.Fatal(err)
}
r := bytes.NewReader(buf.Bytes()[:n-1])
if _, err = ReadTensor(t1.DataType(), t1.Shape(), r); err == nil {
t.Error("ReadTensor should have failed if the tensor content was truncated")
}
}
func TestReadTensorReadAll(t *testing.T) {
// Get the bytes of a tensor.
a := []float32{1.1, 1.2, 1.3}
ats, err := NewTensor(a)
if err != nil {
t.Fatal(err)
}
abuf := new(bytes.Buffer)
if _, err := ats.WriteContentsTo(abuf); err != nil {
t.Fatal(err)
}
// Get the bytes of another tensor.
b := []float32{1.1, 1.2, 1.3}
bts, err := NewTensor(b)
if err != nil {
t.Fatal(err)
}
bbuf := new(bytes.Buffer)
if _, err := bts.WriteContentsTo(bbuf); err != nil {
t.Fatal(err)
}
// Check that ReadTensor reads all bytes of both tensors, when the situation
// requires one than reads.
abbuf := io.MultiReader(abuf, bbuf)
abts, err := ReadTensor(Float, []int64{2, 3}, abbuf)
if err != nil {
t.Fatal(err)
}
abtsf32 := abts.Value().([][]float32)
expected := [][]float32{a, b}
if len(abtsf32) != 2 {
t.Fatalf("first dimension %d is not 2", len(abtsf32))
}
for i := 0; i < 2; i++ {
if len(abtsf32[i]) != 3 {
t.Fatalf("second dimension %d is not 3", len(abtsf32[i]))
}
for j := 0; j < 3; j++ {
if abtsf32[i][j] != expected[i][j] {
t.Errorf("value at %d %d not equal %f %f", i, j, abtsf32[i][j], expected[i][j])
}
}
}
}
func benchmarkNewTensor(b *testing.B, v interface{}) {<|fim▁hole|> }
}
}
func benchmarkValueTensor(b *testing.B, v interface{}) {
t, err := NewTensor(v)
if err != nil {
b.Fatalf("(%v, %v)", t, err)
}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = t.Value()
}
}
func BenchmarkTensor(b *testing.B) {
// Some sample sizes from the Inception image labeling model.
// Where input tensors correspond to a 224x224 RGB image
// flattened into a vector.
var vector [224 * 224 * 3]int32
var arrays [100][100][100]int32
l3 := make([][][]float32, 100)
l2 := make([][]float32, 100*100)
l1 := make([]float32, 100*100*100)
for i := range l2 {
l2[i] = l1[i*100 : (i+1)*100]
}
for i := range l3 {
l3[i] = l2[i*100 : (i+1)*100]
}
s1 := make([]string, 100*100*100)
s2 := make([][]string, 100*100)
s3 := make([][][]string, 100)
for i := range s1 {
s1[i] = "cheesit"
}
for i := range s2 {
s2[i] = s1[i*100 : (i+1)*100]
}
for i := range s3 {
s3[i] = s2[i*100 : (i+1)*100]
}
tests := []interface{}{
vector,
arrays,
l1,
l2,
l3,
s1,
s2,
s3,
}
b.Run("New", func(b *testing.B) {
for _, test := range tests {
b.Run(fmt.Sprintf("%T", test), func(b *testing.B) { benchmarkNewTensor(b, test) })
}
})
b.Run("Value", func(b *testing.B) {
for _, test := range tests {
b.Run(fmt.Sprintf("%T", test), func(b *testing.B) { benchmarkValueTensor(b, test) })
}
})
}<|fim▁end|>
|
b.ReportAllocs()
for i := 0; i < b.N; i++ {
if t, err := NewTensor(v); err != nil || t == nil {
b.Fatalf("(%v, %v)", t, err)
|
<|file_name|>formfields.py<|end_file_name|><|fim▁begin|># file eulcommon/djangoextras/formfields.py
#
# Copyright 2010,2011 Emory University Libraries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Custom generic form fields for use with Django forms.
----
'''
import re
from django.core.validators import RegexValidator
from django.forms import CharField, ChoiceField
from django.forms.widgets import Select, TextInput, Widget
from django.utils.safestring import mark_safe
# regular expression to validate and parse W3C dates
W3C_DATE_RE = re.compile(r'^(?P<year>\d{4})(?:-(?P<month>[0-1]\d)(?:-(?P<day>[0-3]\d))?)?$')
validate_w3c_date = RegexValidator(W3C_DATE_RE,
u'Enter a valid W3C date in one of these formats: YYYY, YYYY-MM, or YYYY-MM-DD',
'invalid')
class W3CDateWidget(Widget):
'''Multi-part date widget that generates three text input boxes for year,
month, and day. Expects and generates dates in any of these W3C formats,
depending on which fields are filled in: YYYY-MM-DD, YYYY-MM, or YYYY.
'''
# based in part on SelectDateWidget from django.forms.extras.widgets
month_field = '%s_month'
day_field = '%s_day'
year_field = '%s_year'
def value_from_datadict(self, data, files, name):
'''Generate a single value from multi-part form data. Constructs a W3C
date based on values that are set, leaving out day and month if they are
not present.
:param data: dictionary of data submitted by the form
:param files: - unused
:param name: base name of the form field
:returns: string value
'''
y = data.get(self.year_field % name)
m = data.get(self.month_field % name)
d = data.get(self.day_field % name)
if y == 'YYYY':
y = ''
if m == 'MM':
m = ''
if d == 'DD':
d = ''
date = y
if m:
date += '-%s' % m
if d:
date += '-%s' % d
return date
# TODO: split out logic so it is easier to extend and customize display
def render(self, name, value, attrs=None):
'''Render the widget as HTML inputs for display on a form.
:param name: form field base name
:param value: date value
:param attrs: - unused
:returns: HTML text with three inputs for year/month/day
'''
# expects a value in format YYYY-MM-DD or YYYY-MM or YYYY (or empty/None)
year, month, day = 'YYYY', 'MM', 'DD'
if value:
# use the regular expression to pull out year, month, and day values
# if regular expression does not match, inputs will be empty
match = W3C_DATE_RE.match(value)
if match:
date_parts = match.groupdict()
year = date_parts['year']
month = date_parts['month']
day = date_parts['day']
year_html = self.create_textinput(name, self.year_field, year, size=4, title='4-digit year', onClick='javascript:if(this.value == "YYYY") { this.value = "" };')
month_html = self.create_textinput(name, self.month_field, month, size=2, title='2-digit month', onClick='javascript:if(this.value == "MM") { this.value = "" };')
day_html = self.create_textinput(name, self.day_field, day, size=2, title='2-digit day', onClick='javascript:if(this.value == "DD") { this.value = "" };')
# display widget fields in YYYY-MM-DD order to match W3C date format,
# and putting required field(s) on the left
output = [year_html, month_html, day_html]
return mark_safe(u' / \n'.join(output))
def create_textinput(self, name, field, value, **extra_attrs):
'''Generate and render a :class:`django.forms.widgets.TextInput` for
a single year, month, or day input.
If size is specified in the extra attributes, it will also be used to
set the maximum length of the field.
:param name: base name of the input field
:param field: pattern for this field (used with name to generate input name)
:param value: initial value for the field
:param extra_attrs: any extra widget attributes
:returns: rendered HTML output for the text input
'''
# TODO: move id-generation logic out for re-use
if 'id' in self.attrs:
id_ = self.attrs['id']
else:
id_ = 'id_%s' % name<|fim▁hole|> extra_attrs['maxlength'] = extra_attrs['size']
local_attrs = self.build_attrs(id=field % id_, **extra_attrs)
txtinput = TextInput()
return txtinput.render(field % name, value, local_attrs)
class W3CDateField(CharField):
'''W3C date field that uses a :class:`~eulcore.django.forms.fields.W3CDateWidget`
for presentation and uses a simple regular expression to do basic validation
on the input (but does not actually test that it is a valid date).
'''
widget = W3CDateWidget
default_error_messages = {
'invalid': u'Enter a date in one of these formats: YYYY, YYYY-MM, or YYYY-MM-DD',
}
default_validators = [validate_w3c_date]
class DynamicSelect(Select):
'''A :class:`~django.forms.widgets.Select` widget whose choices are not
static, but instead generated dynamically when referenced.
:param choices: callable; this will be called to generate choices each
time they are referenced.
'''
def __init__(self, attrs=None, choices=None):
# Skip right over Select and go to its parents. Select just sets
# self.choices, which will break since it's a property here.
super(DynamicSelect, self).__init__(attrs)
if choices is None:
choices = lambda: ()
self._choices = choices
def _get_choices(self):
return self._choices()
def _set_choices(self, choices):
self._choices = choices
choices = property(_get_choices, _set_choices)
class DynamicChoiceField(ChoiceField):
'''A :class:`django.forms.ChoiceField` whose choices are not static, but
instead generated dynamically when referenced.
:param choices: callable; this will be called to generate choices each
time they are referenced
'''
widget = DynamicSelect
def __init__(self, choices=None, widget=None, *args, **kwargs):
# ChoiceField.__init__ tries to set static choices, which won't
# work since our choices are dynamic, so we're going to have to skip
# over it.
# First normalize our choices
if choices is None:
choices = lambda: ()
self._choices = choices
# Then normalize our widget, constructing it with our choices
# function if we need to construct it.
if widget is None:
widget = self.widget
if isinstance(widget, type):
widget = widget(choices=self._choices)
# Now call call super.__init__(), but bypass ChoiceField.
# ChoiceField just sets static choices manually and then calls its
# own super. We don't have static choices, so ChoiceField.__init__()
# would break if we called it. Skip over ChoiceField and go straight
# to *its* super.__init__().
super(ChoiceField, self).__init__(widget=widget, *args, **kwargs)
def _get_choices(self):
return self._choices()
def _set_choices(self, choices):
# if choices is updated, update the widget choice callable also
self._choices = choices
self.widget._choices = self._choices
choices = property(_get_choices, _set_choices)<|fim▁end|>
|
# use size to set maximum length
if 'size' in extra_attrs:
|
<|file_name|>database2.py<|end_file_name|><|fim▁begin|>from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal [email protected]'
__docformat__ = 'restructuredtext en'
'''
The database used to store ebook metadata
'''
import os, sys, shutil, cStringIO, glob, time, functools, traceback, re, \
json, uuid, hashlib, copy
from collections import defaultdict
import threading, random
from itertools import repeat
from calibre import prints, force_unicode
from calibre.ebooks.metadata import (title_sort, author_to_author_sort,
string_to_authors, authors_to_string, get_title_sort_pat)
from calibre.ebooks.metadata.opf2 import metadata_to_opf
from calibre.library.database import LibraryDatabase
from calibre.library.field_metadata import FieldMetadata, TagsIcons
from calibre.library.schema_upgrades import SchemaUpgrade
from calibre.library.caches import ResultCache
from calibre.library.custom_columns import CustomColumns
from calibre.library.sqlite import connect, IntegrityError
from calibre.library.prefs import DBPrefs
from calibre.ebooks.metadata.book.base import Metadata
from calibre.constants import preferred_encoding, iswindows, filesystem_encoding
from calibre.ptempfile import (PersistentTemporaryFile,
base_dir, SpooledTemporaryFile)
from calibre.customize.ui import (run_plugins_on_import,
run_plugins_on_postimport)
from calibre import isbytestring
from calibre.utils.filenames import (ascii_filename, samefile,
WindowsAtomicFolderMove, hardlink_file)
from calibre.utils.date import (utcnow, now as nowf, utcfromtimestamp,
parse_only_date, UNDEFINED_DATE, parse_date)
from calibre.utils.config import prefs, tweaks, from_json, to_json
from calibre.utils.icu import sort_key, strcmp, lower
from calibre.utils.search_query_parser import saved_searches, set_saved_searches
from calibre.ebooks import BOOK_EXTENSIONS, check_ebook_format
from calibre.utils.magick.draw import save_cover_data_to
from calibre.utils.recycle_bin import delete_file, delete_tree
from calibre.utils.formatter_functions import load_user_template_functions
from calibre.db import _get_next_series_num_for_list, _get_series_values
from calibre.db.errors import NoSuchFormat
from calibre.db.lazy import FormatMetadata, FormatsList
from calibre.db.categories import Tag, CATEGORY_SORTS
from calibre.utils.localization import (canonicalize_lang,
calibre_langcode_to_name)
copyfile = os.link if hasattr(os, 'link') else shutil.copyfile
SPOOL_SIZE = 30*1024*1024
class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
'''
An ebook metadata database that stores references to ebook files on disk.
'''
PATH_LIMIT = 40 if 'win32' in sys.platform else 100
WINDOWS_LIBRARY_PATH_LIMIT = 75
@dynamic_property
def user_version(self):
doc = 'The user version of this database'
def fget(self):
return self.conn.get('pragma user_version;', all=False)
def fset(self, val):
self.conn.execute('pragma user_version=%d'%int(val))
self.conn.commit()
return property(doc=doc, fget=fget, fset=fset)
@dynamic_property
def library_id(self):
doc = ('The UUID for this library. As long as the user only operates'
' on libraries with calibre, it will be unique')
def fget(self):
if self._library_id_ is None:
ans = self.conn.get('SELECT uuid FROM library_id', all=False)
if ans is None:
ans = str(uuid.uuid4())
self.library_id = ans
else:
self._library_id_ = ans
return self._library_id_
def fset(self, val):
self._library_id_ = unicode(val)
self.conn.executescript('''
DELETE FROM library_id;
INSERT INTO library_id (uuid) VALUES ("%s");
'''%self._library_id_)
self.conn.commit()
return property(doc=doc, fget=fget, fset=fset)
def connect(self):
if iswindows and len(self.library_path) + 4*self.PATH_LIMIT + 10 > 259:
raise ValueError(_(
'Path to library too long. Must be less than'
' %d characters.')%(259-4*self.PATH_LIMIT-10))
exists = os.path.exists(self.dbpath)
if not exists:
# Be more strict when creating new libraries as the old calculation
# allowed for max path lengths of 265 chars.
if (iswindows and len(self.library_path) >
self.WINDOWS_LIBRARY_PATH_LIMIT):
raise ValueError(_(
'Path to library too long. Must be less than'
' %d characters.')%self.WINDOWS_LIBRARY_PATH_LIMIT)
self.conn = connect(self.dbpath, self.row_factory)
if exists and self.user_version == 0:
self.conn.close()
os.remove(self.dbpath)
self.conn = connect(self.dbpath, self.row_factory)
if self.user_version == 0:
self.initialize_database()
# remember to add any filter to the connect method in sqlite.py as well
# so that various code that connects directly will not complain about
# missing functions
self.books_list_filter = self.conn.create_dynamic_filter('books_list_filter')
# Store temporary tables in memory
self.conn.execute('pragma temp_store=2')
self.conn.commit()
@classmethod
def exists_at(cls, path):
return path and os.path.exists(os.path.join(path, 'metadata.db'))
def __init__(self, library_path, row_factory=False, default_prefs=None,
read_only=False, is_second_db=False, progress_callback=None,
restore_all_prefs=False):
self.is_second_db = is_second_db
try:
if isbytestring(library_path):
library_path = library_path.decode(filesystem_encoding)
except:
traceback.print_exc()
self.field_metadata = FieldMetadata()
self.format_filename_cache = defaultdict(dict)
self._library_id_ = None
# Create the lock to be used to guard access to the metadata writer
# queues. This must be an RLock, not a Lock
self.dirtied_lock = threading.RLock()
if not os.path.exists(library_path):
os.makedirs(library_path)
self.listeners = set([])
self.library_path = os.path.abspath(library_path)
self.row_factory = row_factory
self.dbpath = os.path.join(library_path, 'metadata.db')
self.dbpath = os.environ.get('CALIBRE_OVERRIDE_DATABASE_PATH',
self.dbpath)
if read_only and os.path.exists(self.dbpath):
# Work on only a copy of metadata.db to ensure that
# metadata.db is not changed
pt = PersistentTemporaryFile('_metadata_ro.db')
pt.close()
shutil.copyfile(self.dbpath, pt.name)
self.dbpath = pt.name
apply_default_prefs = not os.path.exists(self.dbpath)
self.connect()
self.is_case_sensitive = (not iswindows and
not os.path.exists(self.dbpath.replace('metadata.db',
'MeTAdAtA.dB')))
SchemaUpgrade.__init__(self)
# Guarantee that the library_id is set
self.library_id
# if we are to copy the prefs and structure from some other DB, then
# we need to do it before we call initialize_dynamic
if apply_default_prefs and default_prefs is not None:
if progress_callback is None:
progress_callback = lambda x, y: True
dbprefs = DBPrefs(self)
progress_callback(None, len(default_prefs))
for i, key in enumerate(default_prefs):
# be sure that prefs not to be copied are listed below
if not restore_all_prefs and key in frozenset(['news_to_be_synced']):
continue
dbprefs[key] = default_prefs[key]
progress_callback(_('restored preference ') + key, i+1)
if 'field_metadata' in default_prefs:
fmvals = [f for f in default_prefs['field_metadata'].values() if f['is_custom']]
progress_callback(None, len(fmvals))
for i, f in enumerate(fmvals):
progress_callback(_('creating custom column ') + f['label'], i)
self.create_custom_column(f['label'], f['name'], f['datatype'],
f['is_multiple'] is not None and len(f['is_multiple']) > 0,
f['is_editable'], f['display'])
self.initialize_template_cache()
self.initialize_dynamic()
def initialize_template_cache(self):
self.formatter_template_cache = {}
def get_property(self, idx, index_is_id=False, loc=-1):
row = self.data._data[idx] if index_is_id else self.data[idx]
if row is not None:
return row[loc]
def initialize_dynamic(self):
self.field_metadata = FieldMetadata() # Ensure we start with a clean copy
self.prefs = DBPrefs(self)
defs = self.prefs.defaults
defs['gui_restriction'] = defs['cs_restriction'] = ''
defs['categories_using_hierarchy'] = []
defs['column_color_rules'] = []
defs['column_icon_rules'] = []
defs['grouped_search_make_user_categories'] = []
defs['similar_authors_search_key'] = 'authors'
defs['similar_authors_match_kind'] = 'match_any'
defs['similar_publisher_search_key'] = 'publisher'
defs['similar_publisher_match_kind'] = 'match_any'
defs['similar_tags_search_key'] = 'tags'
defs['similar_tags_match_kind'] = 'match_all'
defs['similar_series_search_key'] = 'series'
defs['similar_series_match_kind'] = 'match_any'
defs['book_display_fields'] = [
('title', False), ('authors', True), ('formats', True),
('series', True), ('identifiers', True), ('tags', True),
('path', True), ('publisher', False), ('rating', False),
('author_sort', False), ('sort', False), ('timestamp', False),
('uuid', False), ('comments', True), ('id', False), ('pubdate', False),
('last_modified', False), ('size', False), ('languages', False),
]
defs['virtual_libraries'] = {}
defs['virtual_lib_on_startup'] = defs['cs_virtual_lib_on_startup'] = ''
# Migrate the bool tristate tweak
defs['bools_are_tristate'] = \
tweaks.get('bool_custom_columns_are_tristate', 'yes') == 'yes'
if self.prefs.get('bools_are_tristate') is None:
self.prefs.set('bools_are_tristate', defs['bools_are_tristate'])
# Migrate column coloring rules
if self.prefs.get('column_color_name_1', None) is not None:
from calibre.library.coloring import migrate_old_rule
old_rules = []
for i in range(1, 6):
col = self.prefs.get('column_color_name_'+str(i), None)
templ = self.prefs.get('column_color_template_'+str(i), None)
if col and templ:
try:
del self.prefs['column_color_name_'+str(i)]
rules = migrate_old_rule(self.field_metadata, templ)
for templ in rules:
old_rules.append((col, templ))
except:
pass
if old_rules:
self.prefs['column_color_rules'] += old_rules
# Migrate saved search and user categories to db preference scheme
def migrate_preference(key, default):
oldval = prefs[key]
if oldval != default:
self.prefs[key] = oldval
prefs[key] = default
if key not in self.prefs:
self.prefs[key] = default
migrate_preference('user_categories', {})
migrate_preference('saved_searches', {})
if not self.is_second_db:
set_saved_searches(self, 'saved_searches')
# migrate grouped_search_terms
if self.prefs.get('grouped_search_terms', None) is None:
try:
ogst = tweaks.get('grouped_search_terms', {})
ngst = {}
for t in ogst:
ngst[icu_lower(t)] = ogst[t]
self.prefs.set('grouped_search_terms', ngst)
except:
pass
# migrate the gui_restriction preference to a virtual library
gr_pref = self.prefs.get('gui_restriction', None)
if gr_pref:
virt_libs = self.prefs.get('virtual_libraries', {})
virt_libs[gr_pref] = 'search:"' + gr_pref + '"'
self.prefs['virtual_libraries'] = virt_libs
self.prefs['gui_restriction'] = ''
self.prefs['virtual_lib_on_startup'] = gr_pref
# migrate the cs_restriction preference to a virtual library
gr_pref = self.prefs.get('cs_restriction', None)
if gr_pref:
virt_libs = self.prefs.get('virtual_libraries', {})
virt_libs[gr_pref] = 'search:"' + gr_pref + '"'
self.prefs['virtual_libraries'] = virt_libs
self.prefs['cs_restriction'] = ''
self.prefs['cs_virtual_lib_on_startup'] = gr_pref
# Rename any user categories with names that differ only in case
user_cats = self.prefs.get('user_categories', [])
catmap = {}
for uc in user_cats:
ucl = icu_lower(uc)
if ucl not in catmap:
catmap[ucl] = []
catmap[ucl].append(uc)
cats_changed = False
for uc in catmap:
if len(catmap[uc]) > 1:
prints('found user category case overlap', catmap[uc])
cat = catmap[uc][0]
suffix = 1
while icu_lower((cat + unicode(suffix))) in catmap:
suffix += 1
prints('Renaming user category %s to %s'%(cat, cat+unicode(suffix)))
user_cats[cat + unicode(suffix)] = user_cats[cat]
del user_cats[cat]
cats_changed = True
if cats_changed:
self.prefs.set('user_categories', user_cats)
if not self.is_second_db:
load_user_template_functions(self.prefs.get('user_template_functions', []))
# Load the format filename cache
self.refresh_format_cache()
self.conn.executescript('''
DROP TRIGGER IF EXISTS author_insert_trg;
CREATE TEMP TRIGGER author_insert_trg
AFTER INSERT ON authors
BEGIN
UPDATE authors SET sort=author_to_author_sort(NEW.name) WHERE id=NEW.id;
END;
DROP TRIGGER IF EXISTS author_update_trg;
CREATE TEMP TRIGGER author_update_trg
BEFORE UPDATE ON authors
BEGIN
UPDATE authors SET sort=author_to_author_sort(NEW.name)
WHERE id=NEW.id AND name <> NEW.name;
END;
''')
self.conn.execute(
'UPDATE authors SET sort=author_to_author_sort(name) WHERE sort IS NULL')
self.conn.executescript(u'''
CREATE TEMP VIEW IF NOT EXISTS tag_browser_news AS SELECT DISTINCT
id,
name,
(SELECT COUNT(books_tags_link.id) FROM books_tags_link WHERE tag=x.id) count,
(0) as avg_rating,
name as sort
FROM tags as x WHERE name!="{0}" AND id IN
(SELECT DISTINCT tag FROM books_tags_link WHERE book IN
(SELECT DISTINCT book FROM books_tags_link WHERE tag IN
(SELECT id FROM tags WHERE name="{0}")));
'''.format(_('News')))
self.conn.executescript(u'''
CREATE TEMP VIEW IF NOT EXISTS tag_browser_filtered_news AS SELECT DISTINCT
id,
name,
(SELECT COUNT(books_tags_link.id) FROM books_tags_link WHERE tag=x.id and books_list_filter(book)) count,
(0) as avg_rating,
name as sort
FROM tags as x WHERE name!="{0}" AND id IN
(SELECT DISTINCT tag FROM books_tags_link WHERE book IN
(SELECT DISTINCT book FROM books_tags_link WHERE tag IN
(SELECT id FROM tags WHERE name="{0}")));
'''.format(_('News')))
self.conn.commit()
CustomColumns.__init__(self)
template = '''\
(SELECT {query} FROM books_{table}_link AS link INNER JOIN
{table} ON(link.{link_col}={table}.id) WHERE link.book=books.id)
{col}
'''
columns = ['id', 'title',
# col table link_col query
('authors', 'authors', 'author', 'sortconcat(link.id, name)'),
'timestamp',
'(SELECT MAX(uncompressed_size) FROM data WHERE book=books.id) size',
('rating', 'ratings', 'rating', 'ratings.rating'),
('tags', 'tags', 'tag', 'group_concat(name)'),
'(SELECT text FROM comments WHERE book=books.id) comments',
('series', 'series', 'series', 'name'),
('publisher', 'publishers', 'publisher', 'name'),
'series_index',
'sort',
'author_sort',
'(SELECT group_concat(format) FROM data WHERE data.book=books.id) formats',
'path',
'pubdate',
'uuid',
'has_cover',
('au_map', 'authors', 'author',
'aum_sortconcat(link.id, authors.name, authors.sort, authors.link)'),
'last_modified',
'(SELECT identifiers_concat(type, val) FROM identifiers WHERE identifiers.book=books.id) identifiers',
('languages', 'languages', 'lang_code',
'sortconcat(link.id, languages.lang_code)'),
]
lines = []
for col in columns:
line = col
if isinstance(col, tuple):
line = template.format(col=col[0], table=col[1],
link_col=col[2], query=col[3])
lines.append(line)
custom_map = self.custom_columns_in_meta()
# custom col labels are numbers (the id in the custom_columns table)
custom_cols = list(sorted(custom_map.keys()))
lines.extend([custom_map[x] for x in custom_cols])
self.FIELD_MAP = {'id':0, 'title':1, 'authors':2, 'timestamp':3,
'size':4, 'rating':5, 'tags':6, 'comments':7, 'series':8,
'publisher':9, 'series_index':10, 'sort':11, 'author_sort':12,
'formats':13, 'path':14, 'pubdate':15, 'uuid':16, 'cover':17,
'au_map':18, 'last_modified':19, 'identifiers':20, 'languages':21}
for k,v in self.FIELD_MAP.iteritems():
self.field_metadata.set_field_record_index(k, v, prefer_custom=False)
base = max(self.FIELD_MAP.values())
for col in custom_cols:
self.FIELD_MAP[col] = base = base+1
self.field_metadata.set_field_record_index(
self.custom_column_num_map[col]['label'],
base,
prefer_custom=True)
if self.custom_column_num_map[col]['datatype'] == 'series':
# account for the series index column. Field_metadata knows that
# the series index is one larger than the series. If you change
# it here, be sure to change it there as well.
self.FIELD_MAP[str(col)+'_index'] = base = base+1
self.field_metadata.set_field_record_index(
self.custom_column_num_map[col]['label']+'_index',
base,
prefer_custom=True)
self.FIELD_MAP['ondevice'] = base = base+1
self.field_metadata.set_field_record_index('ondevice', base, prefer_custom=False)
self.FIELD_MAP['marked'] = base = base+1
self.field_metadata.set_field_record_index('marked', base, prefer_custom=False)
self.FIELD_MAP['series_sort'] = base = base+1
self.field_metadata.set_field_record_index('series_sort', base, prefer_custom=False)
script = '''
DROP VIEW IF EXISTS meta2;
CREATE TEMP VIEW meta2 AS
SELECT
{0}
FROM books;
'''.format(', \n'.join(lines))
self.conn.executescript(script)
self.conn.commit()
# Reconstruct the user categories, putting them into field_metadata
# Assumption is that someone else will fix them if they change.
self.field_metadata.remove_dynamic_categories()
for user_cat in sorted(self.prefs.get('user_categories', {}).keys(), key=sort_key):
cat_name = '@' + user_cat # add the '@' to avoid name collision
self.field_metadata.add_user_category(label=cat_name, name=user_cat)
# add grouped search term user categories
muc = self.prefs.get('grouped_search_make_user_categories', [])
for cat in sorted(self.prefs.get('grouped_search_terms', {}).keys(), key=sort_key):
if cat in muc:
# There is a chance that these can be duplicates of an existing
# user category. Print the exception and continue.
try:
self.field_metadata.add_user_category(label=u'@' + cat, name=cat)
except:
traceback.print_exc()
if len(saved_searches().names()):
self.field_metadata.add_search_category(label='search', name=_('Searches'))
self.field_metadata.add_grouped_search_terms(
self.prefs.get('grouped_search_terms', {}))
self.book_on_device_func = None
self.data = ResultCache(self.FIELD_MAP, self.field_metadata, db_prefs=self.prefs)
self.search = self.data.search
self.search_getting_ids = self.data.search_getting_ids
self.refresh = functools.partial(self.data.refresh, self)
self.sort = self.data.sort
self.multisort = self.data.multisort
self.index = self.data.index
self.refresh_ids = functools.partial(self.data.refresh_ids, self)
self.row = self.data.row
self.has_id = self.data.has_id
self.count = self.data.count
self.set_marked_ids = self.data.set_marked_ids
for prop in (
'author_sort', 'authors', 'comment', 'comments',
'publisher', 'rating', 'series', 'series_index', 'tags',
'title', 'timestamp', 'uuid', 'pubdate', 'ondevice',
'metadata_last_modified', 'languages',
):
fm = {'comment':'comments', 'metadata_last_modified':
'last_modified'}.get(prop, prop)
setattr(self, prop, functools.partial(self.get_property,
loc=self.FIELD_MAP[fm]))
setattr(self, 'title_sort', functools.partial(self.get_property,
loc=self.FIELD_MAP['sort']))
d = self.conn.get('SELECT book FROM metadata_dirtied', all=True)
with self.dirtied_lock:
self.dirtied_sequence = 0
self.dirtied_cache = {}
for x in d:
self.dirtied_cache[x[0]] = self.dirtied_sequence
self.dirtied_sequence += 1
self.refresh_ondevice = functools.partial(self.data.refresh_ondevice, self)
self.refresh()
self.last_update_check = self.last_modified()
def break_cycles(self):
self.data.break_cycles()
self.data = self.field_metadata = self.prefs = self.listeners = \
self.refresh_ondevice = None
def initialize_database(self):
metadata_sqlite = P('metadata_sqlite.sql', data=True,
allow_user_override=False).decode('utf-8')
self.conn.executescript(metadata_sqlite)
self.conn.commit()
if self.user_version == 0:
self.user_version = 1
def last_modified(self):
''' Return last modified time as a UTC datetime object'''
return utcfromtimestamp(os.stat(self.dbpath).st_mtime)
def refresh_format_cache(self):
self.format_filename_cache = defaultdict(dict)
for book_id, fmt, name in self.conn.get(
'SELECT book,format,name FROM data'):
self.format_filename_cache[book_id][fmt.upper() if fmt else ''] = name
self.format_metadata_cache = defaultdict(dict)
def check_if_modified(self):
if self.last_modified() > self.last_update_check:
self.refresh()
self.refresh_format_cache()
self.last_update_check = utcnow()
def path(self, index, index_is_id=False):
'Return the relative path to the directory containing this books files as a unicode string.'
row = self.data._data[index] if index_is_id else self.data[index]
return row[self.FIELD_MAP['path']].replace('/', os.sep)
def abspath(self, index, index_is_id=False, create_dirs=True):
'Return the absolute path to the directory containing this books files as a unicode string.'
path = os.path.join(self.library_path, self.path(index, index_is_id=index_is_id))
if create_dirs and not os.path.exists(path):
os.makedirs(path)
return path
def construct_path_name(self, id):
'''
Construct the directory name for this book based on its metadata.
'''
authors = self.authors(id, index_is_id=True)
if not authors:
authors = _('Unknown')
author = ascii_filename(authors.split(',')[0].replace('|', ',')
)[:self.PATH_LIMIT].decode('ascii', 'replace')
title = ascii_filename(self.title(id, index_is_id=True)
)[:self.PATH_LIMIT].decode('ascii', 'replace')
while author[-1] in (' ', '.'):
author = author[:-1]
if not author:
author = ascii_filename(_('Unknown')).decode(
'ascii', 'replace')
path = author + '/' + title + ' (%d)'%id
return path
def construct_file_name(self, id):
'''
Construct the file name for this book based on its metadata.
'''
authors = self.authors(id, index_is_id=True)
if not authors:
authors = _('Unknown')
author = ascii_filename(authors.split(',')[0].replace('|', ',')
)[:self.PATH_LIMIT].decode('ascii', 'replace')
title = ascii_filename(self.title(id, index_is_id=True)
)[:self.PATH_LIMIT].decode('ascii', 'replace')
name = title + ' - ' + author
while name.endswith('.'):
name = name[:-1]
return name
def rmtree(self, path, permanent=False):
if not self.normpath(self.library_path).startswith(self.normpath(path)):
delete_tree(path, permanent=permanent)
def normpath(self, path):
path = os.path.abspath(os.path.realpath(path))
if not self.is_case_sensitive:
path = os.path.normcase(path).lower()
return path
def set_path(self, index, index_is_id=False):
'''
Set the path to the directory containing this books files based on its
current title and author. If there was a previous directory, its contents
are copied and it is deleted.
'''
id = index if index_is_id else self.id(index)
path = self.construct_path_name(id)
current_path = self.path(id, index_is_id=True).replace(os.sep, '/')
formats = self.formats(id, index_is_id=True)
formats = formats.split(',') if formats else []
# Check if the metadata used to construct paths has changed
fname = self.construct_file_name(id)
changed = False
for format in formats:
name = self.format_filename_cache[id].get(format.upper(), None)
if name and name != fname:
changed = True
break
if path == current_path and not changed:
return
spath = os.path.join(self.library_path, *current_path.split('/'))
tpath = os.path.join(self.library_path, *path.split('/'))
source_ok = current_path and os.path.exists(spath)
wam = WindowsAtomicFolderMove(spath) if iswindows and source_ok else None
try:
if not os.path.exists(tpath):
os.makedirs(tpath)
if source_ok: # Migrate existing files
self.copy_cover_to(id, os.path.join(tpath, 'cover.jpg'),
index_is_id=True, windows_atomic_move=wam,
use_hardlink=True)
for format in formats:
copy_function = functools.partial(self.copy_format_to, id,
format, index_is_id=True, windows_atomic_move=wam,
use_hardlink=True)
try:
self.add_format(id, format, None, index_is_id=True,
path=tpath, notify=False, copy_function=copy_function)
except NoSuchFormat:
continue
self.conn.execute('UPDATE books SET path=? WHERE id=?', (path, id))
self.dirtied([id], commit=False)
self.conn.commit()
self.data.set(id, self.FIELD_MAP['path'], path, row_is_id=True)
# Delete not needed directories
if source_ok:
if not samefile(spath, tpath):
if wam is not None:
wam.delete_originals()
self.rmtree(spath, permanent=True)
parent = os.path.dirname(spath)
if len(os.listdir(parent)) == 0:
self.rmtree(parent, permanent=True)
finally:
if wam is not None:
wam.close_handles()
curpath = self.library_path
c1, c2 = current_path.split('/'), path.split('/')
if not self.is_case_sensitive and len(c1) == len(c2):
# On case-insensitive systems, title and author renames that only
# change case don't cause any changes to the directories in the file
# system. This can lead to having the directory names not match the
# title/author, which leads to trouble when libraries are copied to
# a case-sensitive system. The following code attempts to fix this
# by checking each segment. If they are different because of case,
# then rename the segment to some temp file name, then rename it
# back to the correct name. Note that the code above correctly
# handles files in the directories, so no need to do them here.
for oldseg, newseg in zip(c1, c2):
if oldseg.lower() == newseg.lower() and oldseg != newseg:
try:
os.rename(os.path.join(curpath, oldseg),
os.path.join(curpath, newseg))
except:
break # Fail silently since nothing catastrophic has happened
curpath = os.path.join(curpath, newseg)
def add_listener(self, listener):
'''
Add a listener. Will be called on change events with two arguments.
Event name and list of affected ids.
'''
self.listeners.add(listener)
def notify(self, event, ids=[]):
'Notify all listeners'
for listener in self.listeners:
try:
listener(event, ids)
except:
traceback.print_exc()
continue
def cover(self, index, index_is_id=False, as_file=False, as_image=False,
as_path=False):
'''
Return the cover image as a bytestring (in JPEG format) or None.
WARNING: Using as_path will copy the cover to a temp file and return
the path to the temp file. You should delete the temp file when you are
done with it.
:param as_file: If True return the image as an open file object (a SpooledTemporaryFile)
:param as_image: If True return the image as a QImage object
'''
id = index if index_is_id else self.id(index)
path = os.path.join(self.library_path, self.path(id, index_is_id=True), 'cover.jpg')
if os.access(path, os.R_OK):
try:
f = lopen(path, 'rb')
except (IOError, OSError):
time.sleep(0.2)
f = lopen(path, 'rb')
with f:
if as_path:
pt = PersistentTemporaryFile('_dbcover.jpg')
with pt:
shutil.copyfileobj(f, pt)
return pt.name
if as_file:
ret = SpooledTemporaryFile(SPOOL_SIZE)
shutil.copyfileobj(f, ret)
ret.seek(0)
else:
ret = f.read()
if as_image:
from PyQt4.Qt import QImage
i = QImage()
i.loadFromData(ret)
ret = i
return ret
def cover_last_modified(self, index, index_is_id=False):
id = index if index_is_id else self.id(index)
path = os.path.join(self.library_path, self.path(id, index_is_id=True), 'cover.jpg')
try:
return utcfromtimestamp(os.stat(path).st_mtime)
except:
# Cover doesn't exist
pass
return self.last_modified()
### The field-style interface. These use field keys.
def get_field(self, idx, key, default=None, index_is_id=False):
mi = self.get_metadata(idx, index_is_id=index_is_id,
get_cover=key == 'cover')
return mi.get(key, default)
def standard_field_keys(self):
return self.field_metadata.standard_field_keys()
def custom_field_keys(self, include_composites=True):
return self.field_metadata.custom_field_keys(include_composites)
def all_field_keys(self):
return self.field_metadata.all_field_keys()
def sortable_field_keys(self):
return self.field_metadata.sortable_field_keys()
def searchable_fields(self):
return self.field_metadata.searchable_field_keys()
def search_term_to_field_key(self, term):
return self.field_metadata.search_term_to_key(term)
def custom_field_metadata(self, include_composites=True):
return self.field_metadata.custom_field_metadata(include_composites)
def all_metadata(self):
return self.field_metadata.all_metadata()
def metadata_for_field(self, key):
return self.field_metadata[key]
def clear_dirtied(self, book_id, sequence):
'''
Clear the dirtied indicator for the books. This is used when fetching
metadata, creating an OPF, and writing a file are separated into steps.
The last step is clearing the indicator
'''
with self.dirtied_lock:
dc_sequence = self.dirtied_cache.get(book_id, None)
# print 'clear_dirty: check book', book_id, dc_sequence
if dc_sequence is None or sequence is None or dc_sequence == sequence:
# print 'needs to be cleaned'
self.conn.execute('DELETE FROM metadata_dirtied WHERE book=?',
(book_id,))
self.conn.commit()
try:
del self.dirtied_cache[book_id]
except:
pass
elif dc_sequence is not None:
# print 'book needs to be done again'
pass
def dump_metadata(self, book_ids=None, remove_from_dirtied=True,
commit=True, callback=None):
'''
Write metadata for each record to an individual OPF file. If callback
is not None, it is called once at the start with the number of book_ids
being processed. And once for every book_id, with arguments (book_id,
mi, ok).
'''
if book_ids is None:
book_ids = [x[0] for x in self.conn.get(
'SELECT book FROM metadata_dirtied', all=True)]
if callback is not None:
book_ids = tuple(book_ids)
callback(len(book_ids), True, False)
for book_id in book_ids:
if not self.data.has_id(book_id):
if callback is not None:
callback(book_id, None, False)
continue
path, mi, sequence = self.get_metadata_for_dump(book_id)
if path is None:
if callback is not None:
callback(book_id, mi, False)
continue
try:
raw = metadata_to_opf(mi)
with lopen(path, 'wb') as f:
f.write(raw)
if remove_from_dirtied:
self.clear_dirtied(book_id, sequence)
except:
pass
if callback is not None:
callback(book_id, mi, True)
if commit:
self.conn.commit()
def update_last_modified(self, book_ids, commit=False, now=None):
if now is None:
now = nowf()
if book_ids:
self.conn.executemany(
'UPDATE books SET last_modified=? WHERE id=?',
[(now, book) for book in book_ids])
for book_id in book_ids:
self.data.set(book_id, self.FIELD_MAP['last_modified'], now, row_is_id=True)
if commit:
self.conn.commit()
def dirtied(self, book_ids, commit=True):
self.update_last_modified(book_ids)
for book in book_ids:
with self.dirtied_lock:
# print 'dirtied: check id', book
if book in self.dirtied_cache:
self.dirtied_cache[book] = self.dirtied_sequence
self.dirtied_sequence += 1
continue
# print 'book not already dirty'
self.conn.execute(
'INSERT OR IGNORE INTO metadata_dirtied (book) VALUES (?)',
(book,))
self.dirtied_cache[book] = self.dirtied_sequence
self.dirtied_sequence += 1
# If the commit doesn't happen, then the DB table will be wrong. This
# could lead to a problem because on restart, we won't put the book back
# into the dirtied_cache. We deal with this by writing the dirtied_cache
# back to the table on GUI exit. Not perfect, but probably OK
if book_ids and commit:
self.conn.commit()
def get_a_dirtied_book(self):
with self.dirtied_lock:
l = len(self.dirtied_cache)
if l > 0:
# The random stuff is here to prevent a single book from
# blocking progress if its metadata cannot be written for some
# reason.
id_ = self.dirtied_cache.keys()[random.randint(0, l-1)]
sequence = self.dirtied_cache[id_]
return (id_, sequence)
return (None, None)
def dirty_queue_length(self):
return len(self.dirtied_cache)
def commit_dirty_cache(self):
'''
Set the dirty indication for every book in the cache. The vast majority
of the time, the indication will already be set. However, sometimes
exceptions may have prevented a commit, which may remove some dirty
indications from the DB. This call will put them back. Note that there
is no problem with setting a dirty indication for a book that isn't in
fact dirty. Just wastes a few cycles.
'''
with self.dirtied_lock:
book_ids = list(self.dirtied_cache.keys())
self.dirtied_cache = {}
self.dirtied(book_ids)
def get_metadata_for_dump(self, idx):
path, mi = (None, None)
# get the current sequence number for this book to pass back to the
# backup thread. This will avoid double calls in the case where the
# thread has not done the work between the put and the get_metadata
with self.dirtied_lock:
sequence = self.dirtied_cache.get(idx, None)
# print 'get_md_for_dump', idx, sequence
try:
# While a book is being created, the path is empty. Don't bother to
# try to write the opf, because it will go to the wrong folder.
if self.path(idx, index_is_id=True):
path = os.path.join(self.abspath(idx, index_is_id=True), 'metadata.opf')
mi = self.get_metadata(idx, index_is_id=True)
# Always set cover to cover.jpg. Even if cover doesn't exist,
# no harm done. This way no need to call dirtied when
# cover is set/removed
mi.cover = 'cover.jpg'
except:
# This almost certainly means that the book has been deleted while
# the backup operation sat in the queue.
pass
return (path, mi, sequence)
def get_metadata(self, idx, index_is_id=False, get_cover=False,
get_user_categories=True, cover_as_data=False):
'''
Convenience method to return metadata as a :class:`Metadata` object.
Note that the list of formats is not verified.
'''
idx = idx if index_is_id else self.id(idx)
try:
row = self.data._data[idx]
except:
row = None
if row is None:
raise ValueError('No book with id: %d'%idx)
fm = self.FIELD_MAP
mi = Metadata(None, template_cache=self.formatter_template_cache)
aut_list = row[fm['au_map']]
if aut_list:
aut_list = [p.split(':::') for p in aut_list.split(':#:') if p]
else:
aut_list = []
aum = []
aus = {}
aul = {}
try:
for (author, author_sort, link) in aut_list:
aut = author.replace('|', ',')
aum.append(aut)
aus[aut] = author_sort.replace('|', ',')
aul[aut] = link
except ValueError:
# Author has either ::: or :#: in it
for x in row[fm['authors']].split(','):
aum.append(x.replace('|', ','))
aul[aum[-1]] = ''
aus[aum[-1]] = aum[-1]
mi.title = row[fm['title']]
mi.authors = aum
mi.author_sort = row[fm['author_sort']]
mi.author_sort_map = aus
mi.author_link_map = aul
mi.comments = row[fm['comments']]
mi.publisher = row[fm['publisher']]
mi.timestamp = row[fm['timestamp']]
mi.pubdate = row[fm['pubdate']]
mi.uuid = row[fm['uuid']]
mi.title_sort = row[fm['sort']]
mi.book_size = row[fm['size']]
mi.ondevice_col= row[fm['ondevice']]
mi.last_modified = row[fm['last_modified']]
formats = row[fm['formats']]
mi.format_metadata = {}
if not formats:
good_formats = None
else:
formats = sorted(formats.split(','))
mi.format_metadata = FormatMetadata(self, idx, formats)
good_formats = FormatsList(formats, mi.format_metadata)
mi.formats = good_formats
mi.db_approx_formats = formats
tags = row[fm['tags']]
if tags:
mi.tags = [i.strip() for i in tags.split(',')]
languages = row[fm['languages']]
if languages:
mi.languages = [i.strip() for i in languages.split(',')]
mi.series = row[fm['series']]
if mi.series:
mi.series_index = row[fm['series_index']]
mi.rating = row[fm['rating']]
mi.set_identifiers(self.get_identifiers(idx, index_is_id=True))
mi.application_id = idx
mi.id = idx
mi.set_all_user_metadata(self.field_metadata.custom_field_metadata())
for key, meta in self.field_metadata.custom_iteritems():
if meta['datatype'] == 'composite':
mi.set(key, val=row[meta['rec_index']])
else:
val, extra = self.get_custom_and_extra(idx, label=meta['label'],
index_is_id=True)
mi.set(key, val=val, extra=extra)
user_cats = self.prefs['user_categories']
user_cat_vals = {}
if get_user_categories:
for ucat in user_cats:
res = []
for name,cat,ign in user_cats[ucat]:
v = mi.get(cat, None)
if isinstance(v, list):
if name in v:
res.append([name,cat])
elif name == v:
res.append([name,cat])
user_cat_vals[ucat] = res
mi.user_categories = user_cat_vals
if get_cover:
if cover_as_data:
cdata = self.cover(idx, index_is_id=True)
if cdata:
mi.cover_data = ('jpeg', cdata)
else:
mi.cover = self.cover(idx, index_is_id=True, as_path=True)
mi.has_cover = _('Yes') if self.has_cover(idx) else ''
return mi
def has_book(self, mi):
title = mi.title
if title:
if not isinstance(title, unicode):
title = title.decode(preferred_encoding, 'replace')
return bool(self.conn.get('SELECT id FROM books where title=?', (title,), all=False))
return False
def has_id(self, id_):
return self.data._data[id_] is not None
def books_with_same_title(self, mi, all_matches=True):
title = mi.title
ans = set()
if title:
title = lower(force_unicode(title))
for book_id in self.all_ids():
x = self.title(book_id, index_is_id=True)
if lower(x) == title:
ans.add(book_id)
if not all_matches:
break
return ans
def find_identical_books(self, mi):
fuzzy_title_patterns = [(re.compile(pat, re.IGNORECASE) if
isinstance(pat, basestring) else pat, repl) for pat, repl in
[
(r'[\[\](){}<>\'";,:#]', ''),
(get_title_sort_pat(), ''),
(r'[-._]', ' '),
(r'\s+', ' ')
]
]
def fuzzy_title(title):
title = title.strip().lower()
for pat, repl in fuzzy_title_patterns:
title = pat.sub(repl, title)
return title
identical_book_ids = set([])
if mi.authors:
try:
quathors = mi.authors[:10] # Too many authors causes parsing of
# the search expression to fail
query = u' and '.join([u'author:"=%s"'%(a.replace('"', '')) for a in
quathors])
qauthors = mi.authors[10:]
except ValueError:
return identical_book_ids
try:
book_ids = self.data.parse(query)
except:
traceback.print_exc()
return identical_book_ids
if qauthors and book_ids:
matches = set()
qauthors = {lower(x) for x in qauthors}
for book_id in book_ids:
aut = self.authors(book_id, index_is_id=True)
if aut:
aut = {lower(x.replace('|', ',')) for x in
aut.split(',')}
if aut.issuperset(qauthors):
matches.add(book_id)
book_ids = matches
for book_id in book_ids:
fbook_title = self.title(book_id, index_is_id=True)
fbook_title = fuzzy_title(fbook_title)
mbook_title = fuzzy_title(mi.title)
if fbook_title == mbook_title:
identical_book_ids.add(book_id)
return identical_book_ids
def remove_cover(self, id, notify=True, commit=True):
path = os.path.join(self.library_path, self.path(id, index_is_id=True), 'cover.jpg')
if os.path.exists(path):
try:
os.remove(path)
except (IOError, OSError):
time.sleep(0.2)
os.remove(path)
self.conn.execute('UPDATE books SET has_cover=0 WHERE id=?', (id,))
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['cover'], False, row_is_id=True)
if notify:
self.notify('cover', [id])
def set_cover(self, id, data, notify=True, commit=True):
'''
Set the cover for this book.
`data`: Can be either a QImage, QPixmap, file object or bytestring
'''
base_path = os.path.join(self.library_path, self.path(id,
index_is_id=True))
if not os.path.exists(base_path):
self.set_path(id, index_is_id=True)
base_path = os.path.join(self.library_path, self.path(id,
index_is_id=True))
self.dirtied([id])
if not os.path.exists(base_path):
os.makedirs(base_path)
path = os.path.join(base_path, 'cover.jpg')
if callable(getattr(data, 'save', None)):
data.save(path)
else:
if callable(getattr(data, 'read', None)):
data = data.read()
try:
save_cover_data_to(data, path)
except (IOError, OSError):
time.sleep(0.2)
save_cover_data_to(data, path)
now = nowf()
self.conn.execute(
'UPDATE books SET has_cover=1,last_modified=? WHERE id=?',
(now, id))
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['cover'], True, row_is_id=True)
self.data.set(id, self.FIELD_MAP['last_modified'], now, row_is_id=True)
if notify:
self.notify('cover', [id])
def has_cover(self, id):
return self.data.get(id, self.FIELD_MAP['cover'], row_is_id=True)
def set_has_cover(self, id, val):
dval = 1 if val else 0
now = nowf()
self.conn.execute(
'UPDATE books SET has_cover=?,last_modified=? WHERE id=?',
(dval, now, id))
self.data.set(id, self.FIELD_MAP['cover'], val, row_is_id=True)
self.data.set(id, self.FIELD_MAP['last_modified'], now, row_is_id=True)
def book_on_device(self, id):
if callable(self.book_on_device_func):
return self.book_on_device_func(id)
return None
def book_on_device_string(self, id):
loc = []
count = 0
on = self.book_on_device(id)
if on is not None:
m, a, b, count = on[:4]
if m is not None:
loc.append(_('Main'))
if a is not None:
loc.append(_('Card A'))
if b is not None:
loc.append(_('Card B'))
return ', '.join(loc) + ((_(' (%s books)')%count) if count > 1 else '')
def set_book_on_device_func(self, func):
self.book_on_device_func = func
def all_formats(self):
formats = self.conn.get('SELECT DISTINCT format from data')
if not formats:
return set([])
return set([f[0] for f in formats])
def format_files(self, index, index_is_id=False):
id = index if index_is_id else self.id(index)
return [(v, k) for k, v in self.format_filename_cache[id].iteritems()]
def formats(self, index, index_is_id=False, verify_formats=True):
''' Return available formats as a comma separated list or None if there are no available formats '''
id_ = index if index_is_id else self.id(index)
formats = self.data.get(id_, self.FIELD_MAP['formats'], row_is_id=True)
if not formats:
return None
if not verify_formats:
return formats
formats = formats.split(',')
ans = []
for fmt in formats:
if self.format_abspath(id_, fmt, index_is_id=True) is not None:
ans.append(fmt)
if not ans:
return None
return ','.join(ans)
def has_format(self, index, format, index_is_id=False):
return self.format_abspath(index, format, index_is_id) is not None
def format_last_modified(self, id_, fmt):
m = self.format_metadata(id_, fmt)
if m:
return m['mtime']
def format_metadata(self, id_, fmt, allow_cache=True, update_db=False,
commit=False):
if not fmt:
return {}
fmt = fmt.upper()
if allow_cache:
x = self.format_metadata_cache[id_].get(fmt, None)
if x is not None:
return x
path = self.format_abspath(id_, fmt, index_is_id=True)
ans = {}
if path is not None:
stat = os.stat(path)
ans['path'] = path
ans['size'] = stat.st_size
ans['mtime'] = utcfromtimestamp(stat.st_mtime)
self.format_metadata_cache[id_][fmt] = ans
if update_db:
self.conn.execute(
'UPDATE data SET uncompressed_size=? WHERE format=? AND'
' book=?', (stat.st_size, fmt, id_))
if commit:
self.conn.commit()
return ans
def format_hash(self, id_, fmt):
path = self.format_abspath(id_, fmt, index_is_id=True)
if path is None:
raise NoSuchFormat('Record %d has no fmt: %s'%(id_, fmt))
sha = hashlib.sha256()
with lopen(path, 'rb') as f:
while True:
raw = f.read(SPOOL_SIZE)
sha.update(raw)
if len(raw) < SPOOL_SIZE:
break
return sha.hexdigest()
def format_path(self, index, fmt, index_is_id=False):
'''
This method is intended to be used only in those rare situations, like
Drag'n Drop, when you absolutely need the path to the original file.
Otherwise, use format(..., as_path=True).
Note that a networked backend will always return None.
'''
path = self.format_abspath(index, fmt, index_is_id=index_is_id)
if path is None:
id_ = index if index_is_id else self.id(index)
raise NoSuchFormat('Record %d has no format: %s'%(id_, fmt))
return path
def format_abspath(self, index, format, index_is_id=False):
'''
Return absolute path to the ebook file of format `format`
WARNING: This method will return a dummy path for a network backend DB,
so do not rely on it, use format(..., as_path=True) instead.
Currently used only in calibredb list, the viewer and the catalogs (via
get_data_as_dict()).
Apart from the viewer, I don't believe any of the others do any file
I/O with the results of this call.
'''
id = index if index_is_id else self.id(index)
try:
name = self.format_filename_cache[id][format.upper()]
except:
return None
if name:
path = os.path.join(self.library_path, self.path(id, index_is_id=True))
format = ('.' + format.lower()) if format else ''
fmt_path = os.path.join(path, name+format)
if os.path.exists(fmt_path):
return fmt_path
try:
candidates = glob.glob(os.path.join(path, '*'+format))
except: # If path contains strange characters this throws an exc
candidates = []
if format and candidates and os.path.exists(candidates[0]):
try:
shutil.copyfile(candidates[0], fmt_path)
except:
# This can happen if candidates[0] or fmt_path is too long,
# which can happen if the user copied the library from a
# non windows machine to a windows machine.
return None
return fmt_path
def copy_format_to(self, index, fmt, dest, index_is_id=False,
windows_atomic_move=None, use_hardlink=False):
'''
Copy the format ``fmt`` to the file like object ``dest``. If the
specified format does not exist, raises :class:`NoSuchFormat` error.
dest can also be a path, in which case the format is copied to it, iff
the path is different from the current path (taking case sensitivity
into account).
If use_hardlink is True, a hard link will be created instead of the
file being copied. Use with care, because a hard link means that
modifying any one file will cause both files to be modified.
windows_atomic_move is an internally used parameter. You should not use
it in any code outside this module.
'''
path = self.format_abspath(index, fmt, index_is_id=index_is_id)
if path is None:
id_ = index if index_is_id else self.id(index)
raise NoSuchFormat('Record %d has no %s file'%(id_, fmt))
if windows_atomic_move is not None:
if not isinstance(dest, basestring):
raise Exception("Error, you must pass the dest as a path when"
" using windows_atomic_move")
if dest:
if samefile(path, dest):
# Ensure that the file has the same case as dest
try:
if path != dest:
os.rename(path, dest)
except:
pass # Nothing too catastrophic happened, the cases mismatch, that's all
else:
windows_atomic_move.copy_path_to(path, dest)
else:
if hasattr(dest, 'write'):
with lopen(path, 'rb') as f:
shutil.copyfileobj(f, dest)
if hasattr(dest, 'flush'):
dest.flush()
elif dest:
if samefile(dest, path):
if not self.is_case_sensitive and path != dest:
# Ensure that the file has the same case as dest
try:
os.rename(path, dest)
except:
pass # Nothing too catastrophic happened, the cases mismatch, that's all
else:
if use_hardlink:
try:
hardlink_file(path, dest)
return
except:
pass
with lopen(path, 'rb') as f, lopen(dest, 'wb') as d:
shutil.copyfileobj(f, d)
def copy_cover_to(self, index, dest, index_is_id=False,
windows_atomic_move=None, use_hardlink=False):
'''
Copy the cover to the file like object ``dest``. Returns False
if no cover exists or dest is the same file as the current cover.
dest can also be a path in which case the cover is
copied to it iff the path is different from the current path (taking
case sensitivity into account).
If use_hardlink is True, a hard link will be created instead of the
file being copied. Use with care, because a hard link means that
modifying any one file will cause both files to be modified.
windows_atomic_move is an internally used parameter. You should not use
it in any code outside this module.
'''
id = index if index_is_id else self.id(index)
path = os.path.join(self.library_path, self.path(id, index_is_id=True), 'cover.jpg')
if windows_atomic_move is not None:
if not isinstance(dest, basestring):
raise Exception("Error, you must pass the dest as a path when"
" using windows_atomic_move")
if os.access(path, os.R_OK) and dest and not samefile(dest, path):
windows_atomic_move.copy_path_to(path, dest)
return True
else:
if os.access(path, os.R_OK):
try:
f = lopen(path, 'rb')
except (IOError, OSError):
time.sleep(0.2)
f = lopen(path, 'rb')
with f:
if hasattr(dest, 'write'):
shutil.copyfileobj(f, dest)
if hasattr(dest, 'flush'):
dest.flush()
return True
elif dest and not samefile(dest, path):
if use_hardlink:
try:
hardlink_file(path, dest)
return True
except:
pass
with lopen(dest, 'wb') as d:
shutil.copyfileobj(f, d)
return True
return False
def format(self, index, format, index_is_id=False, as_file=False,
mode='r+b', as_path=False, preserve_filename=False):
'''
Return the ebook format as a bytestring or `None` if the format doesn't exist,
or we don't have permission to write to the ebook file.
:param as_file: If True the ebook format is returned as a file object. Note
that the file object is a SpooledTemporaryFile, so if what you want to
do is copy the format to another file, use :method:`copy_format_to`
instead for performance.
:param as_path: Copies the format file to a temp file and returns the
path to the temp file
:param preserve_filename: If True and returning a path the filename is
the same as that used in the library. Note that using
this means that repeated calls yield the same
temp file (which is re-created each time)
:param mode: This is ignored (present for legacy compatibility)
'''
path = self.format_abspath(index, format, index_is_id=index_is_id)
if path is not None:
with lopen(path, mode) as f:
if as_path:
if preserve_filename:
bd = base_dir()
d = os.path.join(bd, 'format_abspath')
try:
os.makedirs(d)
except:
pass
fname = os.path.basename(path)
ret = os.path.join(d, fname)
with lopen(ret, 'wb') as f2:
shutil.copyfileobj(f, f2)
else:
with PersistentTemporaryFile('.'+format.lower()) as pt:
shutil.copyfileobj(f, pt)
ret = pt.name
elif as_file:
ret = SpooledTemporaryFile(SPOOL_SIZE)
shutil.copyfileobj(f, ret)
ret.seek(0)
# Various bits of code try to use the name as the default
# title when reading metadata, so set it
ret.name = f.name
else:
ret = f.read()
return ret
def add_format_with_hooks(self, index, format, fpath, index_is_id=False,
path=None, notify=True, replace=True):
npath = self.run_import_plugins(fpath, format)
format = os.path.splitext(npath)[-1].lower().replace('.', '').upper()
stream = lopen(npath, 'rb')
format = check_ebook_format(stream, format)
retval = self.add_format(index, format, stream, replace=replace,
index_is_id=index_is_id, path=path, notify=notify)
run_plugins_on_postimport(self, id, format)
return retval
def add_format(self, index, format, stream, index_is_id=False, path=None,
notify=True, replace=True, copy_function=None):
id = index if index_is_id else self.id(index)
if not format:
format = ''
self.format_metadata_cache[id].pop(format.upper(), None)
name = self.format_filename_cache[id].get(format.upper(), None)
if path is None:
path = os.path.join(self.library_path, self.path(id, index_is_id=True))
if name and not replace:
return False
name = self.construct_file_name(id)
ext = ('.' + format.lower()) if format else ''
dest = os.path.join(path, name+ext)
pdir = os.path.dirname(dest)
if not os.path.exists(pdir):
os.makedirs(pdir)
size = 0
if copy_function is not None:
copy_function(dest)
size = os.path.getsize(dest)
else:
if (not getattr(stream, 'name', False) or not samefile(dest,
stream.name)):
with lopen(dest, 'wb') as f:
shutil.copyfileobj(stream, f)
size = f.tell()
elif os.path.exists(dest):
size = os.path.getsize(dest)
self.conn.execute('INSERT OR REPLACE INTO data (book,format,uncompressed_size,name) VALUES (?,?,?,?)',
(id, format.upper(), size, name))
self.update_last_modified([id], commit=False)
self.conn.commit()
self.format_filename_cache[id][format.upper()] = name
self.refresh_ids([id])
if notify:
self.notify('metadata', [id])
return True
def save_original_format(self, book_id, fmt, notify=True):
fmt = fmt.upper()
if 'ORIGINAL' in fmt:
raise ValueError('Cannot save original of an original fmt')
opath = self.format_abspath(book_id, fmt, index_is_id=True)
if opath is None:
return False
nfmt = 'ORIGINAL_'+fmt
with lopen(opath, 'rb') as f:
return self.add_format(book_id, nfmt, f, index_is_id=True, notify=notify)
def original_fmt(self, book_id, fmt):
fmt = fmt
nfmt = ('ORIGINAL_%s'%fmt).upper()
opath = self.format_abspath(book_id, nfmt, index_is_id=True)
return fmt if opath is None else nfmt
def restore_original_format(self, book_id, original_fmt, notify=True):
opath = self.format_abspath(book_id, original_fmt, index_is_id=True)
if opath is not None:
fmt = original_fmt.partition('_')[2]
with lopen(opath, 'rb') as f:
self.add_format(book_id, fmt, f, index_is_id=True, notify=False)
self.remove_format(book_id, original_fmt, index_is_id=True, notify=notify)
def delete_book(self, id, notify=True, commit=True, permanent=False,
do_clean=True):
'''
Removes book from the result cache and the underlying database.
If you set commit to False, you must call clean() manually afterwards
'''
try:
path = os.path.join(self.library_path, self.path(id, index_is_id=True))
except:
path = None
if path and os.path.exists(path):
self.rmtree(path, permanent=permanent)
parent = os.path.dirname(path)
if len(os.listdir(parent)) == 0:
self.rmtree(parent, permanent=permanent)
self.conn.execute('DELETE FROM books WHERE id=?', (id,))
if commit:
self.conn.commit()
if do_clean:
self.clean()
self.data.books_deleted([id])
if notify:
self.notify('delete', [id])
def remove_format(self, index, format, index_is_id=False, notify=True,
commit=True, db_only=False):
id = index if index_is_id else self.id(index)
if not format:
format = ''
self.format_metadata_cache[id].pop(format.upper(), None)
name = self.format_filename_cache[id].get(format.upper(), None)
if name:
if not db_only:
try:
path = self.format_abspath(id, format, index_is_id=True)
if path:
delete_file(path)
except:
traceback.print_exc()
self.format_filename_cache[id].pop(format.upper(), None)
self.conn.execute('DELETE FROM data WHERE book=? AND format=?', (id, format.upper()))
if commit:
self.conn.commit()
self.refresh_ids([id])
if notify:
self.notify('metadata', [id])
def clean_standard_field(self, field, commit=False):
# Don't bother with validity checking. Let the exception fly out so
# we can see what happened
def doit(table, ltable_col):
st = ('DELETE FROM books_%s_link WHERE (SELECT COUNT(id) '
'FROM books WHERE id=book) < 1;')%table
self.conn.execute(st)
st = ('DELETE FROM %(table)s WHERE (SELECT COUNT(id) '
'FROM books_%(table)s_link WHERE '
'%(ltable_col)s=%(table)s.id) < 1;') % dict(
table=table, ltable_col=ltable_col)
self.conn.execute(st)
fm = self.field_metadata[field]
doit(fm['table'], fm['link_column'])
if commit:
self.conn.commit()
def clean(self):
'''
Remove orphaned entries.
'''
def doit(ltable, table, ltable_col):
st = ('DELETE FROM books_%s_link WHERE (SELECT COUNT(id) '
'FROM books WHERE id=book) < 1;')%ltable
self.conn.execute(st)
st = ('DELETE FROM %(table)s WHERE (SELECT COUNT(id) '
'FROM books_%(ltable)s_link WHERE '
'%(ltable_col)s=%(table)s.id) < 1;') % dict(
ltable=ltable, table=table, ltable_col=ltable_col)
self.conn.execute(st)
for ltable, table, ltable_col in [
('authors', 'authors', 'author'),
('publishers', 'publishers', 'publisher'),
('tags', 'tags', 'tag'),
('series', 'series', 'series'),
('languages', 'languages', 'lang_code'),
]:
doit(ltable, table, ltable_col)
for id_, tag in self.conn.get('SELECT id, name FROM tags', all=True):
if not tag.strip():
self.conn.execute('DELETE FROM books_tags_link WHERE tag=?',
(id_,))
self.conn.execute('DELETE FROM tags WHERE id=?', (id_,))
self.clean_custom()
self.conn.commit()
def get_books_for_category(self, category, id_):
ans = set([])
if category not in self.field_metadata:
return ans
field = self.field_metadata[category]
if field['datatype'] == 'composite':
dex = field['rec_index']
for book in self.data.iterall():
if field['is_multiple']:
vals = [v.strip() for v in
book[dex].split(field['is_multiple']['cache_to_list'])
if v.strip()]
if id_ in vals:
ans.add(book[0])
elif book[dex] == id_:
ans.add(book[0])
return ans
ans = self.conn.get(
'SELECT book FROM books_{tn}_link WHERE {col}=?'.format(
tn=field['table'], col=field['link_column']), (id_,))
return set(x[0] for x in ans)
########## data structures for get_categories
CATEGORY_SORTS = CATEGORY_SORTS
MATCH_TYPE = ('any', 'all')
class TCat_Tag(object):
def __init__(self, name, sort):
self.n = name
self.s = sort
self.c = 0
self.id_set = set()
self.rt = 0
self.rc = 0
self.id = None
def set_all(self, c, rt, rc, id):
self.c = c
self.rt = rt
self.rc = rc
self.id = id
def __str__(self):
return unicode(self)
def __unicode__(self):
return 'n=%s s=%s c=%d rt=%d rc=%d id=%s'%\
(self.n, self.s, self.c, self.rt, self.rc, self.id)
def clean_user_categories(self):
user_cats = self.prefs.get('user_categories', {})
new_cats = {}
for k in user_cats:
comps = [c.strip() for c in k.split('.') if c.strip()]
if len(comps) == 0:
i = 1
while True:
if unicode(i) not in user_cats:
new_cats[unicode(i)] = user_cats[k]
break
i += 1
else:
new_cats['.'.join(comps)] = user_cats[k]
try:
if new_cats != user_cats:
self.prefs.set('user_categories', new_cats)
except:
pass
return new_cats
def get_categories(self, sort='name', ids=None, icon_map=None):
#start = last = time.clock()
if icon_map is not None and type(icon_map) != TagsIcons:
raise TypeError('icon_map passed to get_categories must be of type TagIcons')
if sort not in self.CATEGORY_SORTS:
raise ValueError('sort ' + sort + ' not a valid value')
self.books_list_filter.change([] if not ids else ids)
id_filter = None if ids is None else frozenset(ids)
tb_cats = self.field_metadata
tcategories = {}
tids = {}
md = []
# First, build the maps. We need a category->items map and an
# item -> (item_id, sort_val) map to use in the books loop
for category in tb_cats.iterkeys():
cat = tb_cats[category]
if not cat['is_category'] or cat['kind'] in ['user', 'search'] \
or category in ['news', 'formats'] or cat.get('is_csp',
False):
continue
# Get the ids for the item values
if not cat['is_custom']:
funcs = {
'authors': self.get_authors_with_ids,
'series': self.get_series_with_ids,
'publisher': self.get_publishers_with_ids,
'tags': self.get_tags_with_ids,
'languages': self.get_languages_with_ids,
'rating': self.get_ratings_with_ids,
}
func = funcs.get(category, None)
if func:
list = func()
else:
raise ValueError(category + ' has no get with ids function')
else:
list = self.get_custom_items_with_ids(label=cat['label'])
tids[category] = {}
if category == 'authors':
for l in list:
(id, val, sort_val) = (l[0], l[1], l[2])
tids[category][val] = (id, sort_val)
elif category == 'languages':
for l in list:
id, val = l[0], calibre_langcode_to_name(l[1])
tids[category][l[1]] = (id, val)
elif cat['datatype'] == 'series':
for l in list:
(id, val) = (l[0], l[1])
tids[category][val] = (id, title_sort(val))
elif cat['datatype'] == 'rating':
for l in list:
(id, val) = (l[0], l[1])
tids[category][val] = (id, '{0:05.2f}'.format(val))
elif cat['datatype'] == 'text' and cat['is_multiple'] and \
cat['display'].get('is_names', False):
for l in list:
(id, val) = (l[0], l[1])
tids[category][val] = (id, author_to_author_sort(val))
else:
for l in list:
(id, val) = (l[0], l[1])
tids[category][val] = (id, val)
# add an empty category to the category map
tcategories[category] = {}
# create a list of category/field_index for the books scan to use.
# This saves iterating through field_metadata for each book
md.append((category, cat['rec_index'],
cat['is_multiple'].get('cache_to_list', None), False))
for category in tb_cats.iterkeys():
cat = tb_cats[category]
if cat['datatype'] == 'composite' and \
cat['display'].get('make_category', False):
tids[category] = {}
tcategories[category] = {}
md.append((category, cat['rec_index'],
cat['is_multiple'].get('cache_to_list', None),
cat['datatype'] == 'composite'))
#print 'end phase "collection":', time.clock() - last, 'seconds'
#last = time.clock()
# Now scan every book looking for category items.
# Code below is duplicated because it shaves off 10% of the loop time
id_dex = self.FIELD_MAP['id']
rating_dex = self.FIELD_MAP['rating']
tag_class = LibraryDatabase2.TCat_Tag
for book in self.data.iterall():
if id_filter is not None and book[id_dex] not in id_filter:
continue
rating = book[rating_dex]
# We kept track of all possible category field_map positions above
for (cat, dex, mult, is_comp) in md:
if not book[dex]:
continue
tid_cat = tids[cat]
tcats_cat = tcategories[cat]
if not mult:
val = book[dex]
if is_comp:
item = tcats_cat.get(val, None)
if not item:
item = tag_class(val, val)
tcats_cat[val] = item
item.c += 1
item.id = val
if rating > 0:
item.rt += rating
item.rc += 1
continue
try:
(item_id, sort_val) = tid_cat[val] # let exceptions fly
item = tcats_cat.get(val, None)
if not item:
item = tag_class(val, sort_val)
tcats_cat[val] = item
item.c += 1
item.id_set.add(book[0])
item.id = item_id
if rating > 0:
item.rt += rating
item.rc += 1
except:
prints('get_categories: item', val, 'is not in', cat, 'list!')
else:
vals = book[dex].split(mult)
if is_comp:
vals = [v.strip() for v in vals if v.strip()]
for val in vals:
if val not in tid_cat:
tid_cat[val] = (val, val)
for val in vals:
try:
(item_id, sort_val) = tid_cat[val] # let exceptions fly
item = tcats_cat.get(val, None)
if not item:
item = tag_class(val, sort_val)
tcats_cat[val] = item
item.c += 1
item.id_set.add(book[0])
item.id = item_id
if rating > 0:
item.rt += rating
item.rc += 1
except:
prints('get_categories: item', val, 'is not in', cat, 'list!')
#print 'end phase "books":', time.clock() - last, 'seconds'
#last = time.clock()
# Now do news
tcategories['news'] = {}
cat = tb_cats['news']
tn = cat['table']
cn = cat['column']
if ids is None:
query = '''SELECT id, {0}, count, avg_rating, sort
FROM tag_browser_{1}'''.format(cn, tn)
else:
query = '''SELECT id, {0}, count, avg_rating, sort
FROM tag_browser_filtered_{1}'''.format(cn, tn)
# results will be sorted later
data = self.conn.get(query)
for r in data:
item = LibraryDatabase2.TCat_Tag(r[1], r[1])
item.set_all(c=r[2], rt=r[2]*r[3], rc=r[2], id=r[0])
tcategories['news'][r[1]] = item
#print 'end phase "news":', time.clock() - last, 'seconds'
#last = time.clock()
# Build the real category list by iterating over the temporary copy
# and building the Tag instances.
categories = {}
tag_class = Tag
for category in tb_cats.iterkeys():
if category not in tcategories:
continue
cat = tb_cats[category]
# prepare the place where we will put the array of Tags
categories[category] = []
# icon_map is not None if get_categories is to store an icon and
# possibly a tooltip in the tag structure.
icon = None
label = tb_cats.key_to_label(category)
if icon_map:
if not tb_cats.is_custom_field(category):
if category in icon_map:
icon = icon_map[label]
else:
icon = icon_map['custom:']
icon_map[category] = icon
datatype = cat['datatype']
avgr = lambda x: 0.0 if x.rc == 0 else x.rt/x.rc
# Duplicate the build of items below to avoid using a lambda func
# in the main Tag loop. Saves a few %
if datatype == 'rating':
formatter = (lambda x:u'\u2605'*int(x/2))
avgr = lambda x: x.n
# eliminate the zero ratings line as well as count == 0
items = [v for v in tcategories[category].values() if v.c > 0 and v.n != 0]
elif category == 'authors':
# Clean up the authors strings to human-readable form
formatter = (lambda x: x.replace('|', ','))
items = [v for v in tcategories[category].values() if v.c > 0]
elif category == 'languages':
# Use a human readable language string
formatter = calibre_langcode_to_name
items = [v for v in tcategories[category].values() if v.c > 0]
else:
formatter = (lambda x:unicode(x))
items = [v for v in tcategories[category].values() if v.c > 0]
# sort the list
if sort == 'name':
kf = lambda x:sort_key(x.s)
reverse=False
elif sort == 'popularity':
kf = lambda x: x.c
reverse=True
else:
kf = avgr
reverse=True
items.sort(key=kf, reverse=reverse)
if tweaks['categories_use_field_for_author_name'] == 'author_sort' and\
(category == 'authors' or
(cat['display'].get('is_names', False) and
cat['is_custom'] and cat['is_multiple'] and
cat['datatype'] == 'text')):
use_sort_as_name = True
else:
use_sort_as_name = False
is_editable = (category not in ['news', 'rating', 'languages'] and
datatype != "composite")
categories[category] = [tag_class(formatter(r.n), count=r.c, id=r.id,
avg=avgr(r), sort=r.s, icon=icon,
category=category,
id_set=r.id_set, is_editable=is_editable,
use_sort_as_name=use_sort_as_name)
for r in items]
#print 'end phase "tags list":', time.clock() - last, 'seconds'
#last = time.clock()
# Needed for legacy databases that have multiple ratings that
# map to n stars
for r in categories['rating']:
r.id_set = None
for x in categories['rating']:
if r.name == x.name and r.id != x.id:
r.count = r.count + x.count
categories['rating'].remove(x)
break
# We delayed computing the standard formats category because it does not
# use a view, but is computed dynamically
categories['formats'] = []
icon = None
if icon_map and 'formats' in icon_map:
icon = icon_map['formats']
for fmt in self.conn.get('SELECT DISTINCT format FROM data'):
fmt = fmt[0]
if ids is not None:
count = self.conn.get('''SELECT COUNT(id)
FROM data
WHERE format=? AND
books_list_filter(book)''', (fmt,),
all=False)
else:
count = self.conn.get('''SELECT COUNT(id)
FROM data
WHERE format=?''', (fmt,),
all=False)
if count > 0:
categories['formats'].append(Tag(fmt, count=count, icon=icon,
category='formats', is_editable=False))
if sort == 'popularity':
categories['formats'].sort(key=lambda x: x.count, reverse=True)
else: # no ratings exist to sort on
# No need for ICU here.
categories['formats'].sort(key=lambda x:x.name)
# Now do identifiers. This works like formats
categories['identifiers'] = []
icon = None
if icon_map and 'identifiers' in icon_map:
icon = icon_map['identifiers']
for ident in self.conn.get('SELECT DISTINCT type FROM identifiers'):
ident = ident[0]
if ids is not None:
count = self.conn.get('''SELECT COUNT(book)
FROM identifiers
WHERE type=? AND
books_list_filter(book)''', (ident,),
all=False)
else:
count = self.conn.get('''SELECT COUNT(id)
FROM identifiers
WHERE type=?''', (ident,),
all=False)
if count > 0:
categories['identifiers'].append(Tag(ident, count=count, icon=icon,
category='identifiers',
is_editable=False))
if sort == 'popularity':
categories['identifiers'].sort(key=lambda x: x.count, reverse=True)
else: # no ratings exist to sort on
# No need for ICU here.
categories['identifiers'].sort(key=lambda x:x.name)
#### Now do the user-defined categories. ####
user_categories = dict.copy(self.clean_user_categories())
# We want to use same node in the user category as in the source
# category. To do that, we need to find the original Tag node. There is
# a time/space tradeoff here. By converting the tags into a map, we can
# do the verification in the category loop much faster, at the cost of
# temporarily duplicating the categories lists.
taglist = {}
for c in categories.keys():
taglist[c] = dict(map(lambda t:(icu_lower(t.name), t), categories[c]))
muc = self.prefs.get('grouped_search_make_user_categories', [])
gst = self.prefs.get('grouped_search_terms', {})
for c in gst:
if c not in muc:
continue
user_categories[c] = []
for sc in gst[c]:
if sc in categories.keys():
for t in categories[sc]:
user_categories[c].append([t.name, sc, 0])
gst_icon = icon_map['gst'] if icon_map else None
for user_cat in sorted(user_categories.keys(), key=sort_key):
items = []
names_seen = {}
for (name,label,ign) in user_categories[user_cat]:
n = icu_lower(name)
if label in taglist and n in taglist[label]:
if user_cat in gst:
# for gst items, make copy and consolidate the tags by name.
if n in names_seen:
t = names_seen[n]
t.id_set |= taglist[label][n].id_set
t.count += taglist[label][n].count
t.tooltip = t.tooltip.replace(')', ', ' + label + ')')
else:
t = copy.copy(taglist[label][n])
t.icon = gst_icon
names_seen[t.name] = t
items.append(t)
else:
items.append(taglist[label][n])
# else: do nothing, to not include nodes w zero counts
cat_name = '@' + user_cat # add the '@' to avoid name collision
# Not a problem if we accumulate entries in the icon map
if icon_map is not None:
icon_map[cat_name] = icon_map['user:']
if sort == 'popularity':
categories[cat_name] = \
sorted(items, key=lambda x: x.count, reverse=True)
elif sort == 'name':
categories[cat_name] = \
sorted(items, key=lambda x: sort_key(x.sort))
else:
categories[cat_name] = \
sorted(items, key=lambda x:x.avg_rating, reverse=True)
#### Finally, the saved searches category ####
items = []
icon = None
if icon_map and 'search' in icon_map:
icon = icon_map['search']
for srch in saved_searches().names():
items.append(Tag(srch, tooltip=saved_searches().lookup(srch),
sort=srch, icon=icon, category='search',
is_editable=False))
if len(items):
if icon_map is not None:
icon_map['search'] = icon_map['search']
categories['search'] = items
#print 'last phase ran in:', time.clock() - last, 'seconds'
#print 'get_categories ran in:', time.clock() - start, 'seconds'
return categories
############# End get_categories
def tags_older_than(self, tag, delta, must_have_tag=None,
must_have_authors=None):
'''
Return the ids of all books having the tag ``tag`` that are older than
than the specified time. tag comparison is case insensitive.
:param delta: A timedelta object or None. If None, then all ids with
the tag are returned.
:param must_have_tag: If not None the list of matches will be
restricted to books that have this tag
:param must_have_authors: A list of authors. If not None the list of
matches will be restricted to books that have these authors (case
insensitive).
'''
tag = tag.lower().strip()
mht = must_have_tag.lower().strip() if must_have_tag else None
now = nowf()
tindex = self.FIELD_MAP['timestamp']
gindex = self.FIELD_MAP['tags']
iindex = self.FIELD_MAP['id']
aindex = self.FIELD_MAP['authors']
mah = must_have_authors
if mah is not None:
mah = [x.replace(',', '|').lower() for x in mah]
mah = ','.join(mah)
for r in self.data._data:
if r is not None:
if delta is None or (now - r[tindex]) > delta:
if mah:
authors = r[aindex] or ''
if authors.lower() != mah:
continue
tags = r[gindex]
if tags:
tags = [x.strip() for x in tags.lower().split(',')]
if tag in tags and (mht is None or mht in tags):
yield r[iindex]
def get_next_series_num_for(self, series):
series_id = None
if series:
series_id = self.conn.get('SELECT id from series WHERE name=?',
(series,), all=False)
if series_id is None:
if isinstance(tweaks['series_index_auto_increment'], (int, float)):
return float(tweaks['series_index_auto_increment'])
return 1.0
series_indices = self.conn.get(
('SELECT series_index FROM books WHERE id IN '
'(SELECT book FROM books_series_link where series=?) '
'ORDER BY series_index'),
(series_id,))
return self._get_next_series_num_for_list(series_indices)
def _get_next_series_num_for_list(self, series_indices):
return _get_next_series_num_for_list(series_indices)
def set(self, row, column, val, allow_case_change=False):
'''
Convenience method for setting the title, authors, publisher, tags or
rating
'''
id = self.data[row][0]
col = self.FIELD_MAP[column]
books_to_refresh = set()
set_args = (row, col, val)
if column == 'authors':
val = string_to_authors(val)
books_to_refresh |= self.set_authors(id, val, notify=False,
allow_case_change=allow_case_change)
elif column == 'title':
self.set_title(id, val, notify=False)
elif column == 'publisher':
books_to_refresh |= self.set_publisher(id, val, notify=False,
allow_case_change=allow_case_change)
elif column == 'rating':
self.set_rating(id, val, notify=False)
elif column == 'tags':
books_to_refresh |= \
self.set_tags(id, [x.strip() for x in val.split(',') if x.strip()],
append=False, notify=False, allow_case_change=allow_case_change)
self.data.set(*set_args)
self.data.refresh_ids(self, [id])
self.set_path(id, True)
self.notify('metadata', [id])
return books_to_refresh
def set_metadata(self, id, mi, ignore_errors=False, set_title=True,
set_authors=True, commit=True, force_changes=False,
notify=True):
'''
Set metadata for the book `id` from the `Metadata` object `mi`
Setting force_changes=True will force set_metadata to update fields even
if mi contains empty values. In this case, 'None' is distinguished from
'empty'. If mi.XXX is None, the XXX is not replaced, otherwise it is.
The tags, identifiers, and cover attributes are special cases. Tags and
identifiers cannot be set to None so then will always be replaced if
force_changes is true. You must ensure that mi contains the values you
want the book to have. Covers are always changed if a new cover is
provided, but are never deleted. Also note that force_changes has no
effect on setting title or authors.
'''
if callable(getattr(mi, 'to_book_metadata', None)):
# Handle code passing in a OPF object instead of a Metadata object
mi = mi.to_book_metadata()
def doit(func, *args, **kwargs):
try:
func(*args, **kwargs)
except:
if ignore_errors:
traceback.print_exc()
else:
raise
def should_replace_field(attr):
return (force_changes and (mi.get(attr, None) is not None)) or \
not mi.is_null(attr)
path_changed = False
if set_title and mi.title:
self._set_title(id, mi.title)
path_changed = True
if set_authors:
if not mi.authors:
mi.authors = [_('Unknown')]
authors = []
for a in mi.authors:
authors += string_to_authors(a)
self._set_authors(id, authors)
path_changed = True
if path_changed:
self.set_path(id, index_is_id=True)
if should_replace_field('title_sort'):
self.set_title_sort(id, mi.title_sort, notify=False, commit=False)
if should_replace_field('author_sort'):
doit(self.set_author_sort, id, mi.author_sort, notify=False,
commit=False)
if should_replace_field('publisher'):
doit(self.set_publisher, id, mi.publisher, notify=False,
commit=False)
# Setting rating to zero is acceptable.
if mi.rating is not None:
doit(self.set_rating, id, mi.rating, notify=False, commit=False)
if should_replace_field('series'):
doit(self.set_series, id, mi.series, notify=False, commit=False)
# force_changes has no effect on cover manipulation
if mi.cover_data[1] is not None:
doit(self.set_cover, id, mi.cover_data[1], commit=False)
elif isinstance(mi.cover, basestring) and mi.cover:
if os.access(mi.cover, os.R_OK):
with lopen(mi.cover, 'rb') as f:
raw = f.read()
if raw:
doit(self.set_cover, id, raw, commit=False)
# if force_changes is true, tags are always replaced because the
# attribute cannot be set to None.
if should_replace_field('tags'):
doit(self.set_tags, id, mi.tags, notify=False, commit=False)
if should_replace_field('comments'):
doit(self.set_comment, id, mi.comments, notify=False, commit=False)
if should_replace_field('languages'):
doit(self.set_languages, id, mi.languages, notify=False, commit=False)
# Setting series_index to zero is acceptable
if mi.series_index is not None:
doit(self.set_series_index, id, mi.series_index, notify=False,
commit=False)
if should_replace_field('pubdate'):
doit(self.set_pubdate, id, mi.pubdate, notify=False, commit=False)
if getattr(mi, 'timestamp', None) is not None:
doit(self.set_timestamp, id, mi.timestamp, notify=False,
commit=False)
# identifiers will always be replaced if force_changes is True
mi_idents = mi.get_identifiers()
if force_changes:
self.set_identifiers(id, mi_idents, notify=False, commit=False)
elif mi_idents:
identifiers = self.get_identifiers(id, index_is_id=True)
for key, val in mi_idents.iteritems():
if val and val.strip(): # Don't delete an existing identifier
identifiers[icu_lower(key)] = val
self.set_identifiers(id, identifiers, notify=False, commit=False)
user_mi = mi.get_all_user_metadata(make_copy=False)
for key in user_mi.iterkeys():
if key in self.field_metadata and \
user_mi[key]['datatype'] == self.field_metadata[key]['datatype'] and \
(user_mi[key]['datatype'] != 'text' or
user_mi[key]['is_multiple'] == self.field_metadata[key]['is_multiple']):
val = mi.get(key, None)
if force_changes or val is not None:
doit(self.set_custom, id, val=val, extra=mi.get_extra(key),
label=user_mi[key]['label'], commit=False, notify=False)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id])
def authors_sort_strings(self, id, index_is_id=False):
'''
Given a book, return the list of author sort strings
for the book's authors
'''
id = id if index_is_id else self.id(id)
aut_strings = self.conn.get('''
SELECT sort
FROM authors, books_authors_link as bl
WHERE bl.book=? and authors.id=bl.author
ORDER BY bl.id''', (id,))
result = []
for (sort,) in aut_strings:
result.append(sort)
return result
# Given a book, return the map of author sort strings for the book's authors
def authors_with_sort_strings(self, id, index_is_id=False):
id = id if index_is_id else self.id(id)
aut_strings = self.conn.get('''
SELECT authors.id, authors.name, authors.sort, authors.link
FROM authors, books_authors_link as bl
WHERE bl.book=? and authors.id=bl.author
ORDER BY bl.id''', (id,))
result = []
for (id_, author, sort, link) in aut_strings:
result.append((id_, author.replace('|', ','), sort, link))
return result
# Given a book, return the author_sort string for authors of the book
def author_sort_from_book(self, id, index_is_id=False):
auts = self.authors_sort_strings(id, index_is_id)
return ' & '.join(auts).replace('|', ',')
# Given an author, return a list of books with that author
def books_for_author(self, id_, index_is_id=False):
id_ = id_ if index_is_id else self.id(id_)
books = self.conn.get('''
SELECT bl.book
FROM books_authors_link as bl
WHERE bl.author=?''', (id_,))
return [b[0] for b in books]
# Given a list of authors, return the author_sort string for the authors,
# preferring the author sort associated with the author over the computed
# string
def author_sort_from_authors(self, authors):
result = []
for aut in authors:
r = self.conn.get('SELECT sort FROM authors WHERE name=?',
(aut.replace(',', '|'),), all=False)
if r is None:
result.append(author_to_author_sort(aut))
else:
result.append(r)
return ' & '.join(result).replace('|', ',')
def _update_author_in_cache(self, id_, ss, final_authors):
self.conn.execute('UPDATE books SET author_sort=? WHERE id=?', (ss, id_))
self.data.set(id_, self.FIELD_MAP['authors'],
','.join([a.replace(',', '|') for a in final_authors]),
row_is_id=True)
self.data.set(id_, self.FIELD_MAP['author_sort'], ss, row_is_id=True)
aum = self.authors_with_sort_strings(id_, index_is_id=True)
self.data.set(id_, self.FIELD_MAP['au_map'],
':#:'.join([':::'.join((au.replace(',', '|'), aus, aul))
for (_, au, aus, aul) in aum]),
row_is_id=True)
def _set_authors(self, id, authors, allow_case_change=False):
if not authors:
authors = [_('Unknown')]
self.conn.execute('DELETE FROM books_authors_link WHERE book=?',(id,))
books_to_refresh = set([])
final_authors = []
for a in authors:
case_change = False
if not a:
continue
a = a.strip().replace(',', '|')
if not isinstance(a, unicode):
a = a.decode(preferred_encoding, 'replace')
aus = self.conn.get('SELECT id, name, sort FROM authors WHERE name=?', (a,))
if aus:
aid, name, sort = aus[0]
# Handle change of case
if name != a:
if allow_case_change:
ns = author_to_author_sort(a.replace('|', ','))
if strcmp(sort, ns) == 0:
sort = ns
self.conn.execute('''UPDATE authors SET name=?, sort=?
WHERE id=?''', (a, sort, aid))
case_change = True
else:
a = name
else:
aid = self.conn.execute('''INSERT INTO authors(name)
VALUES (?)''', (a,)).lastrowid
final_authors.append(a.replace('|', ','))
try:
self.conn.execute('''INSERT INTO books_authors_link(book, author)
VALUES (?,?)''', (id, aid))
except IntegrityError: # Sometimes books specify the same author twice in their metadata
pass
if case_change:
bks = self.conn.get('''SELECT book FROM books_authors_link
WHERE author=?''', (aid,))
books_to_refresh |= set([bk[0] for bk in bks])
for bk in books_to_refresh:
ss = self.author_sort_from_book(id, index_is_id=True)
aus = self.author_sort(bk, index_is_id=True)
if strcmp(aus, ss) == 0:
self._update_author_in_cache(bk, ss, final_authors)
# This can repeat what was done above in rare cases. Let it.
ss = self.author_sort_from_book(id, index_is_id=True)
self._update_author_in_cache(id, ss, final_authors)
self.clean_standard_field('authors', commit=True)
return books_to_refresh
def windows_check_if_files_in_use(self, book_id):
'''
Raises an EACCES IOError if any of the files in the folder of book_id
are opened in another program on windows.
'''
if iswindows:
path = self.path(book_id, index_is_id=True)
if path:
spath = os.path.join(self.library_path, *path.split('/'))
wam = None
if os.path.exists(spath):
try:
wam = WindowsAtomicFolderMove(spath)
finally:
if wam is not None:
wam.close_handles()
def set_authors(self, id, authors, notify=True, commit=True,
allow_case_change=False):
'''
Note that even if commit is False, the db will still be committed to
because this causes the location of files to change
:param authors: A list of authors.
'''
self.windows_check_if_files_in_use(id)
books_to_refresh = self._set_authors(id, authors,
allow_case_change=allow_case_change)
self.dirtied(set([id])|books_to_refresh, commit=False)
if commit:
self.conn.commit()
self.set_path(id, index_is_id=True)
if notify:
self.notify('metadata', [id])
return books_to_refresh
def set_title_sort(self, id, title_sort_, notify=True, commit=True):
if not title_sort_:
return False
if isbytestring(title_sort_):
title_sort_ = title_sort_.decode(preferred_encoding, 'replace')
self.conn.execute('UPDATE books SET sort=? WHERE id=?', (title_sort_, id))
self.data.set(id, self.FIELD_MAP['sort'], title_sort_, row_is_id=True)
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id])
return True
def _set_title(self, id, title):
if not title:
return False
if isbytestring(title):
title = title.decode(preferred_encoding, 'replace')
old_title = self.title(id, index_is_id=True)
# We cannot check if old_title == title as previous code might have
# already updated the cache
only_case_change = icu_lower(old_title) == icu_lower(title)
self.conn.execute('UPDATE books SET title=? WHERE id=?', (title, id))
self.data.set(id, self.FIELD_MAP['title'], title, row_is_id=True)
if only_case_change:
# SQLite update trigger will not update sort on a case change
self.conn.execute('UPDATE books SET sort=? WHERE id=?',
(title_sort(title), id))
ts = self.conn.get('SELECT sort FROM books WHERE id=?', (id,),
all=False)
if ts:
self.data.set(id, self.FIELD_MAP['sort'], ts, row_is_id=True)
return True
def set_title(self, id, title, notify=True, commit=True):
'''
Note that even if commit is False, the db will still be committed to
because this causes the location of files to change
'''
self.windows_check_if_files_in_use(id)
if not self._set_title(id, title):
return
self.set_path(id, index_is_id=True)
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id])
def set_languages(self, book_id, languages, notify=True, commit=True):
self.conn.execute(
'DELETE FROM books_languages_link WHERE book=?', (book_id,))
self.conn.execute('''DELETE FROM languages WHERE (SELECT COUNT(id)
FROM books_languages_link WHERE
books_languages_link.lang_code=languages.id) < 1''')
books_to_refresh = set([book_id])
final_languages = []
for l in languages:
lc = canonicalize_lang(l)
if not lc or lc in final_languages or lc in ('und', 'zxx', 'mis',
'mul'):
continue
final_languages.append(lc)
lc_id = self.conn.get('SELECT id FROM languages WHERE lang_code=?',
(lc,), all=False)
if lc_id is None:
lc_id = self.conn.execute('''INSERT INTO languages(lang_code)
VALUES (?)''', (lc,)).lastrowid
self.conn.execute('''INSERT INTO books_languages_link(book, lang_code)
VALUES (?,?)''', (book_id, lc_id))
self.dirtied(books_to_refresh, commit=False)
if commit:
self.conn.commit()
self.data.set(book_id, self.FIELD_MAP['languages'],
u','.join(final_languages), row_is_id=True)
if notify:
self.notify('metadata', [book_id])
return books_to_refresh
def set_timestamp(self, id, dt, notify=True, commit=True):
if dt:
if isinstance(dt, (unicode, bytes)):
dt = parse_date(dt, as_utc=True, assume_utc=False)
self.conn.execute('UPDATE books SET timestamp=? WHERE id=?', (dt, id))
self.data.set(id, self.FIELD_MAP['timestamp'], dt, row_is_id=True)
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id])
def set_pubdate(self, id, dt, notify=True, commit=True):
if not dt:
dt = UNDEFINED_DATE
if isinstance(dt, basestring):
dt = parse_only_date(dt)
self.conn.execute('UPDATE books SET pubdate=? WHERE id=?', (dt, id))
self.data.set(id, self.FIELD_MAP['pubdate'], dt, row_is_id=True)
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id])
def set_publisher(self, id, publisher, notify=True, commit=True,
allow_case_change=False):
self.conn.execute('DELETE FROM books_publishers_link WHERE book=?',(id,))
self.conn.execute('''DELETE FROM publishers WHERE (SELECT COUNT(id)
FROM books_publishers_link
WHERE publisher=publishers.id) < 1''')
books_to_refresh = set([])
if publisher:
case_change = False
if not isinstance(publisher, unicode):
publisher = publisher.decode(preferred_encoding, 'replace')
pubx = self.conn.get('''SELECT id,name from publishers
WHERE name=?''', (publisher,))
if pubx:
aid, cur_name = pubx[0]
if publisher != cur_name:
if allow_case_change:
self.conn.execute('''UPDATE publishers SET name=?
WHERE id=?''', (publisher, aid))
case_change = True
else:
publisher = cur_name
else:
aid = self.conn.execute('''INSERT INTO publishers(name)
VALUES (?)''', (publisher,)).lastrowid
self.conn.execute('''INSERT INTO books_publishers_link(book, publisher)
VALUES (?,?)''', (id, aid))
if case_change:
bks = self.conn.get('''SELECT book FROM books_publishers_link
WHERE publisher=?''', (aid,))
books_to_refresh |= set([bk[0] for bk in bks])
self.dirtied(set([id])|books_to_refresh, commit=False)
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['publisher'], publisher, row_is_id=True)
if notify:
self.notify('metadata', [id])
return books_to_refresh
def set_uuid(self, id, uuid, notify=True, commit=True):
if uuid:
self.conn.execute('UPDATE books SET uuid=? WHERE id=?', (uuid, id))
self.data.set(id, self.FIELD_MAP['uuid'], uuid, row_is_id=True)
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id])
def get_id_from_uuid(self, uuid):
if uuid:
return (self.data._uuid_map.get(uuid, None) or
self.conn.get('SELECT id FROM books WHERE uuid=?', (uuid,),
all=False))
# Convenience methods for tags_list_editor
# Note: we generally do not need to refresh_ids because library_view will
# refresh everything.
def get_ratings_with_ids(self):
result = self.conn.get('SELECT id,rating FROM ratings')
if not result:
return []
return result
def dirty_books_referencing(self, field, id, commit=True):
# Get the list of books to dirty -- all books that reference the item
table = self.field_metadata[field]['table']
link = self.field_metadata[field]['link_column']
bks = self.conn.get(
'SELECT book from books_{0}_link WHERE {1}=?'.format(table, link),
(id,))
books = []
for (book_id,) in bks:
books.append(book_id)
self.dirtied(books, commit=commit)
def get_tags_with_ids(self):
result = self.conn.get('SELECT id,name FROM tags')
if not result:
return []
return result
def get_languages_with_ids(self):
result = self.conn.get('SELECT id,lang_code FROM languages')
if not result:
return []
return result
def rename_tag(self, old_id, new_name):
# It is possible that new_name is in fact a set of names. Split it on
# comma to find out. If it is, then rename the first one and append the
# rest
new_names = [t.strip() for t in new_name.strip().split(',') if t.strip()]
new_name = new_names[0]
new_names = new_names[1:]
# get the list of books that reference the tag being changed
books = self.conn.get('''SELECT book from books_tags_link
WHERE tag=?''', (old_id,))
books = [b[0] for b in books]
new_id = self.conn.get(
'''SELECT id from tags
WHERE name=?''', (new_name,), all=False)
if new_id is None or old_id == new_id:
# easy cases. Simply rename the tag. Do it even if equal, in case
# there is a change of case
self.conn.execute('''UPDATE tags SET name=?
WHERE id=?''', (new_name, old_id))
new_id = old_id
else:
# It is possible that by renaming a tag, the tag will appear
# twice on a book. This will throw an integrity error, aborting
# all the changes. To get around this, we first delete any links
# to the new_id from books referencing the old_id, so that
# renaming old_id to new_id will be unique on the book
for book_id in books:
self.conn.execute('''DELETE FROM books_tags_link
WHERE book=? and tag=?''', (book_id, new_id))
# Change the link table to point at the new tag
self.conn.execute('''UPDATE books_tags_link SET tag=?
WHERE tag=?''',(new_id, old_id,))
# Get rid of the no-longer used publisher
self.conn.execute('DELETE FROM tags WHERE id=?', (old_id,))
if new_names:
# have some left-over names to process. Add them to the book.
for book_id in books:
self.set_tags(book_id, new_names, append=True, notify=False,
commit=False)
self.dirtied(books, commit=False)
self.clean_standard_field('tags', commit=False)
self.conn.commit()
def delete_tag_using_id(self, id):
self.dirty_books_referencing('tags', id, commit=False)
self.conn.execute('DELETE FROM books_tags_link WHERE tag=?', (id,))
self.conn.execute('DELETE FROM tags WHERE id=?', (id,))
self.conn.commit()
def get_series_with_ids(self):
result = self.conn.get('SELECT id,name FROM series')
if not result:
return []
return result
def rename_series(self, old_id, new_name, change_index=True):
new_name = new_name.strip()
new_id = self.conn.get(
'''SELECT id from series
WHERE name=?''', (new_name,), all=False)
if new_id is None or old_id == new_id:
new_id = old_id
self.conn.execute('UPDATE series SET name=? WHERE id=?',
(new_name, old_id))
else:
# New series exists. Must update the link, then assign a
# new series index to each of the books.
if change_index:
# Get the list of books where we must update the series index
books = self.conn.get('''SELECT books.id
FROM books, books_series_link as lt
WHERE books.id = lt.book AND lt.series=?
ORDER BY books.series_index''', (old_id,))
# Now update the link table
self.conn.execute('''UPDATE books_series_link
SET series=?
WHERE series=?''',(new_id, old_id,))
if change_index and tweaks['series_index_auto_increment'] != 'no_change':
# Now set the indices
for (book_id,) in books:
# Get the next series index
index = self.get_next_series_num_for(new_name)
self.conn.execute('''UPDATE books
SET series_index=?
WHERE id=?''',(index, book_id,))
self.dirty_books_referencing('series', new_id, commit=False)
self.clean_standard_field('series', commit=False)
self.conn.commit()
def delete_series_using_id(self, id):
self.dirty_books_referencing('series', id, commit=False)
books = self.conn.get('SELECT book from books_series_link WHERE series=?', (id,))
self.conn.execute('DELETE FROM books_series_link WHERE series=?', (id,))
self.conn.execute('DELETE FROM series WHERE id=?', (id,))
for (book_id,) in books:
self.conn.execute('UPDATE books SET series_index=1.0 WHERE id=?', (book_id,))
self.conn.commit()
def get_publishers_with_ids(self):
result = self.conn.get('SELECT id,name FROM publishers')
if not result:
return []
return result
def rename_publisher(self, old_id, new_name):
new_name = new_name.strip()
new_id = self.conn.get(
'''SELECT id from publishers
WHERE name=?''', (new_name,), all=False)
if new_id is None or old_id == new_id:
new_id = old_id
# New name doesn't exist. Simply change the old name
self.conn.execute('UPDATE publishers SET name=? WHERE id=?',
(new_name, old_id))
else:
# Change the link table to point at the new one
self.conn.execute('''UPDATE books_publishers_link
SET publisher=?
WHERE publisher=?''',(new_id, old_id,))
# Get rid of the no-longer used publisher
self.conn.execute('DELETE FROM publishers WHERE id=?', (old_id,))
self.dirty_books_referencing('publisher', new_id, commit=False)
self.clean_standard_field('publisher', commit=False)
self.conn.commit()
def delete_publisher_using_id(self, old_id):
self.dirty_books_referencing('publisher', old_id, commit=False)
self.conn.execute('''DELETE FROM books_publishers_link
WHERE publisher=?''', (old_id,))
self.conn.execute('DELETE FROM publishers WHERE id=?', (old_id,))
self.conn.commit()
def get_authors_with_ids(self):
result = self.conn.get('SELECT id,name,sort,link FROM authors')
if not result:
return []
return result
def get_author_id(self, author):
author = author.replace(',', '|')
result = self.conn.get('SELECT id FROM authors WHERE name=?',
(author,), all=False)
return result
def set_link_field_for_author(self, aid, link, commit=True, notify=False):
if not link:
link = ''
self.conn.execute('UPDATE authors SET link=? WHERE id=?', (link.strip(), aid))
if commit:
self.conn.commit()
def set_sort_field_for_author(self, old_id, new_sort, commit=True, notify=False):
self.conn.execute('UPDATE authors SET sort=? WHERE id=?',
(new_sort.strip(), old_id))
if commit:
self.conn.commit()
# Now change all the author_sort fields in books by this author
bks = self.conn.get('SELECT book from books_authors_link WHERE author=?', (old_id,))
for (book_id,) in bks:
ss = self.author_sort_from_book(book_id, index_is_id=True)
self.set_author_sort(book_id, ss, notify=notify, commit=commit)
def rename_author(self, old_id, new_name):
# Make sure that any commas in new_name are changed to '|'!
new_name = new_name.replace(',', '|').strip()
if not new_name:
new_name = _('Unknown')
# Get the list of books we must fix up, one way or the other
# Save the list so we can use it twice
bks = self.conn.get('SELECT book from books_authors_link WHERE author=?', (old_id,))
books = []
for (book_id,) in bks:
books.append(book_id)
# check if the new author already exists
new_id = self.conn.get('SELECT id from authors WHERE name=?',
(new_name,), all=False)
if new_id is None or old_id == new_id:
# No name clash. Go ahead and update the author's name
self.conn.execute('UPDATE authors SET name=? WHERE id=?',
(new_name, old_id))
else:
# First check for the degenerate case -- changing a value to itself.
# Update it in case there is a change of case, but do nothing else
if old_id == new_id:
self.conn.execute('UPDATE authors SET name=? WHERE id=?',
(new_name, old_id))
self.conn.commit()
return new_id
# Author exists. To fix this, we must replace all the authors
# instead of replacing the one. Reason: db integrity checks can stop
# the rename process, which would leave everything half-done. We
# can't do it the same way as tags (delete and add) because author
# order is important.
for book_id in books:
# Get the existing list of authors
authors = self.conn.get('''
SELECT author from books_authors_link
WHERE book=?
ORDER BY id''',(book_id,))
# unpack the double-list structure, replacing the old author
# with the new one while we are at it
for i,aut in enumerate(authors):
authors[i] = aut[0] if aut[0] != old_id else new_id
# Delete the existing authors list
self.conn.execute('''DELETE FROM books_authors_link
WHERE book=?''',(book_id,))
# Change the authors to the new list
for aid in authors:
try:
self.conn.execute('''
INSERT INTO books_authors_link(book, author)
VALUES (?,?)''', (book_id, aid))
except IntegrityError:
# Sometimes books specify the same author twice in their
# metadata. Ignore it.
pass
# Now delete the old author from the DB
self.conn.execute('DELETE FROM authors WHERE id=?', (old_id,))
self.dirtied(books, commit=False)
self.conn.commit()
# the authors are now changed, either by changing the author's name
# or replacing the author in the list. Now must fix up the books.
for book_id in books:
# First, must refresh the cache to see the new authors
self.data.refresh_ids(self, [book_id])
# now fix the filesystem paths
self.set_path(book_id, index_is_id=True)
# Next fix the author sort. Reset it to the default
ss = self.author_sort_from_book(book_id, index_is_id=True)
self.set_author_sort(book_id, ss)
# the caller will do a general refresh, so we don't need to
# do one here
return new_id
# end convenience methods
def get_tags(self, id):
result = self.conn.get(
'SELECT name FROM tags WHERE id IN (SELECT tag FROM books_tags_link WHERE book=?)',
(id,), all=True)
if not result:
return set([])
return set([r[0] for r in result])
@classmethod
def cleanup_tags(cls, tags):
tags = [x.strip().replace(',', ';') for x in tags if x.strip()]
tags = [x.decode(preferred_encoding, 'replace')
if isbytestring(x) else x for x in tags]
tags = [u' '.join(x.split()) for x in tags]
ans, seen = [], set([])
for tag in tags:
if tag.lower() not in seen:
seen.add(tag.lower())
ans.append(tag)
return ans
def remove_all_tags(self, ids, notify=False, commit=True):
self.conn.executemany(
'DELETE FROM books_tags_link WHERE book=?', [(x,) for x in ids])
self.dirtied(ids, commit=False)
if commit:
self.conn.commit()
for x in ids:
self.data.set(x, self.FIELD_MAP['tags'], '', row_is_id=True)
if notify:
self.notify('metadata', ids)
def bulk_modify_tags(self, ids, add=[], remove=[], notify=False):
add = self.cleanup_tags(add)
remove = self.cleanup_tags(remove)
remove = set(remove) - set(add)
if not ids or (not add and not remove):
return
# Add tags that do not already exist into the tag table
all_tags = self.all_tags()
lt = [t.lower() for t in all_tags]
new_tags = [t for t in add if t.lower() not in lt]
if new_tags:
self.conn.executemany('INSERT INTO tags(name) VALUES (?)', [(x,) for x in
new_tags])
# Create the temporary tables to store the ids for books and tags
# to be operated on
tables = ('temp_bulk_tag_edit_books', 'temp_bulk_tag_edit_add',
'temp_bulk_tag_edit_remove')
drops = '\n'.join(['DROP TABLE IF EXISTS %s;'%t for t in tables])
creates = '\n'.join(['CREATE TEMP TABLE %s(id INTEGER PRIMARY KEY);'%t
for t in tables])
self.conn.executescript(drops + creates)
# Populate the books temp table
self.conn.executemany(
'INSERT INTO temp_bulk_tag_edit_books VALUES (?)',
[(x,) for x in ids])
# Populate the add/remove tags temp tables
for table, tags in enumerate([add, remove]):
if not tags:
continue
table = tables[table+1]
insert = ('INSERT INTO %s(id) SELECT tags.id FROM tags WHERE name=?'
' COLLATE PYNOCASE LIMIT 1')
self.conn.executemany(insert%table, [(x,) for x in tags])
if remove:
self.conn.execute(
'''DELETE FROM books_tags_link WHERE
book IN (SELECT id FROM %s) AND
tag IN (SELECT id FROM %s)'''
% (tables[0], tables[2]))
if add:
self.conn.execute(
'''
INSERT OR REPLACE INTO books_tags_link(book, tag) SELECT {0}.id, {1}.id FROM
{0}, {1}
'''.format(tables[0], tables[1])
)
self.conn.executescript(drops)
self.dirtied(ids, commit=False)
self.conn.commit()
for x in ids:
tags = u','.join(self.get_tags(x))
self.data.set(x, self.FIELD_MAP['tags'], tags, row_is_id=True)<|fim▁hole|> self.notify('metadata', ids)
def commit(self):
self.conn.commit()
def set_tags(self, id, tags, append=False, notify=True, commit=True,
allow_case_change=False):
'''
@param tags: list of strings
@param append: If True existing tags are not removed
'''
if not tags:
tags = []
if not append:
self.conn.execute('DELETE FROM books_tags_link WHERE book=?', (id,))
self.conn.execute('''DELETE FROM tags WHERE (SELECT COUNT(id)
FROM books_tags_link WHERE tag=tags.id) < 1''')
otags = self.get_tags(id)
tags = self.cleanup_tags(tags)
books_to_refresh = set([])
for tag in (set(tags)-otags):
case_changed = False
tag = tag.strip()
if not tag:
continue
if not isinstance(tag, unicode):
tag = tag.decode(preferred_encoding, 'replace')
existing_tags = self.all_tags()
lt = [t.lower() for t in existing_tags]
try:
idx = lt.index(tag.lower())
except ValueError:
idx = -1
if idx > -1:
etag = existing_tags[idx]
tid = self.conn.get('SELECT id FROM tags WHERE name=?', (etag,), all=False)
if allow_case_change and etag != tag:
self.conn.execute('UPDATE tags SET name=? WHERE id=?', (tag, tid))
case_changed = True
else:
tid = self.conn.execute('INSERT INTO tags(name) VALUES(?)', (tag,)).lastrowid
if not self.conn.get('''SELECT book FROM books_tags_link
WHERE book=? AND tag=?''', (id, tid), all=False):
self.conn.execute('''INSERT INTO books_tags_link(book, tag)
VALUES (?,?)''', (id, tid))
if case_changed:
bks = self.conn.get('SELECT book FROM books_tags_link WHERE tag=?',
(tid,))
books_to_refresh |= set([bk[0] for bk in bks])
self.dirtied(set([id])|books_to_refresh, commit=False)
if commit:
self.conn.commit()
tags = u','.join(self.get_tags(id))
self.data.set(id, self.FIELD_MAP['tags'], tags, row_is_id=True)
if notify:
self.notify('metadata', [id])
return books_to_refresh
def unapply_tags(self, book_id, tags, notify=True):
for tag in tags:
id = self.conn.get('SELECT id FROM tags WHERE name=?', (tag,), all=False)
if id:
self.conn.execute('''DELETE FROM books_tags_link
WHERE tag=? AND book=?''', (id, book_id))
self.conn.commit()
self.data.refresh_ids(self, [book_id])
if notify:
self.notify('metadata', [id])
def is_tag_used(self, tag):
existing_tags = self.all_tags()
lt = [t.lower() for t in existing_tags]
try:
lt.index(tag.lower())
return True
except ValueError:
return False
def delete_tag(self, tag):
existing_tags = self.all_tags()
lt = [t.lower() for t in existing_tags]
try:
idx = lt.index(tag.lower())
except ValueError:
idx = -1
if idx > -1:
id = self.conn.get('SELECT id FROM tags WHERE name=?', (existing_tags[idx],), all=False)
if id:
self.conn.execute('DELETE FROM books_tags_link WHERE tag=?', (id,))
self.conn.execute('DELETE FROM tags WHERE id=?', (id,))
self.conn.commit()
series_index_pat = re.compile(r'(.*)\s+\[([.0-9]+)\]$')
def _get_series_values(self, val):
return _get_series_values(val)
def set_series(self, id, series, notify=True, commit=True, allow_case_change=True):
self.conn.execute('DELETE FROM books_series_link WHERE book=?',(id,))
self.conn.execute('''DELETE FROM series
WHERE (SELECT COUNT(id) FROM books_series_link
WHERE series=series.id) < 1''')
(series, idx) = self._get_series_values(series)
books_to_refresh = set([])
if series:
case_change = False
if not isinstance(series, unicode):
series = series.decode(preferred_encoding, 'replace')
series = series.strip()
series = u' '.join(series.split())
sx = self.conn.get('SELECT id,name from series WHERE name=?', (series,))
if sx:
aid, cur_name = sx[0]
if cur_name != series:
if allow_case_change:
self.conn.execute('UPDATE series SET name=? WHERE id=?', (series, aid))
case_change = True
else:
series = cur_name
else:
aid = self.conn.execute('INSERT INTO series(name) VALUES (?)', (series,)).lastrowid
self.conn.execute('INSERT INTO books_series_link(book, series) VALUES (?,?)', (id, aid))
if idx:
self.set_series_index(id, idx, notify=notify, commit=commit)
if case_change:
bks = self.conn.get('SELECT book FROM books_series_link WHERE series=?',
(aid,))
books_to_refresh |= set([bk[0] for bk in bks])
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['series'], series, row_is_id=True)
if notify:
self.notify('metadata', [id])
return books_to_refresh
def set_series_index(self, id, idx, notify=True, commit=True):
if idx is None:
idx = 1.0
try:
idx = float(idx)
except:
idx = 1.0
self.conn.execute('UPDATE books SET series_index=? WHERE id=?', (idx, id))
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['series_index'], idx, row_is_id=True)
if notify:
self.notify('metadata', [id])
def set_rating(self, id, rating, notify=True, commit=True):
if not rating:
rating = 0
rating = int(rating)
self.conn.execute('DELETE FROM books_ratings_link WHERE book=?',(id,))
rat = self.conn.get('SELECT id FROM ratings WHERE rating=?', (rating,), all=False)
rat = rat if rat is not None else self.conn.execute('INSERT INTO ratings(rating) VALUES (?)', (rating,)).lastrowid
self.conn.execute('INSERT INTO books_ratings_link(book, rating) VALUES (?,?)', (id, rat))
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['rating'], rating, row_is_id=True)
if notify:
self.notify('metadata', [id])
def set_comment(self, id, text, notify=True, commit=True):
self.conn.execute('DELETE FROM comments WHERE book=?', (id,))
if text:
self.conn.execute('INSERT INTO comments(book,text) VALUES (?,?)', (id, text))
else:
text = ''
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['comments'], text, row_is_id=True)
self.dirtied([id], commit=False)
if notify:
self.notify('metadata', [id])
def set_author_sort(self, id, sort, notify=True, commit=True):
if not sort:
sort = ''
self.conn.execute('UPDATE books SET author_sort=? WHERE id=?', (sort, id))
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['author_sort'], sort, row_is_id=True)
if notify:
self.notify('metadata', [id])
def isbn(self, idx, index_is_id=False):
row = self.data._data[idx] if index_is_id else self.data[idx]
if row is not None:
raw = row[self.FIELD_MAP['identifiers']]
if raw:
for x in raw.split(','):
if x.startswith('isbn:'):
return x[5:].strip()
def get_identifiers(self, idx, index_is_id=False):
ans = {}
row = self.data._data[idx] if index_is_id else self.data[idx]
if row is not None:
raw = row[self.FIELD_MAP['identifiers']]
if raw:
for x in raw.split(','):
key, _, val = x.partition(':')
key, val = key.strip(), val.strip()
if key and val:
ans[key] = val
return ans
def get_all_identifier_types(self):
idents = self.conn.get('SELECT DISTINCT type FROM identifiers')
return [ident[0] for ident in idents]
def _clean_identifier(self, typ, val):
typ = icu_lower(typ).strip().replace(':', '').replace(',', '')
val = val.strip().replace(',', '|').replace(':', '|')
return typ, val
def set_identifier(self, id_, typ, val, notify=True, commit=True):
'If val is empty, deletes identifier of type typ'
typ, val = self._clean_identifier(typ, val)
identifiers = self.get_identifiers(id_, index_is_id=True)
if not typ:
return
changed = False
if not val and typ in identifiers:
identifiers.pop(typ)
changed = True
self.conn.execute(
'DELETE from identifiers WHERE book=? AND type=?',
(id_, typ))
if val and identifiers.get(typ, None) != val:
changed = True
identifiers[typ] = val
self.conn.execute(
'INSERT OR REPLACE INTO identifiers (book, type, val) VALUES (?, ?, ?)',
(id_, typ, val))
if changed:
raw = ','.join(['%s:%s'%(k, v) for k, v in
identifiers.iteritems()])
self.data.set(id_, self.FIELD_MAP['identifiers'], raw,
row_is_id=True)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id_])
def set_identifiers(self, id_, identifiers, notify=True, commit=True):
cleaned = {}
if not identifiers:
identifiers = {}
for typ, val in identifiers.iteritems():
typ, val = self._clean_identifier(typ, val)
if val:
cleaned[typ] = val
self.conn.execute('DELETE FROM identifiers WHERE book=?', (id_,))
self.conn.executemany(
'INSERT INTO identifiers (book, type, val) VALUES (?, ?, ?)',
[(id_, k, v) for k, v in cleaned.iteritems()])
raw = ','.join(['%s:%s'%(k, v) for k, v in
cleaned.iteritems()])
self.data.set(id_, self.FIELD_MAP['identifiers'], raw,
row_is_id=True)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id_])
def set_isbn(self, id_, isbn, notify=True, commit=True):
self.set_identifier(id_, 'isbn', isbn, notify=notify, commit=commit)
def add_catalog(self, path, title):
from calibre.ebooks.metadata.meta import get_metadata
format = os.path.splitext(path)[1][1:].lower()
with lopen(path, 'rb') as stream:
matches = self.data.get_matches('title', '='+title)
if matches:
tag_matches = self.data.get_matches('tags', '='+_('Catalog'))
matches = matches.intersection(tag_matches)
db_id = None
if matches:
db_id = list(matches)[0]
if db_id is None:
obj = self.conn.execute('INSERT INTO books(title, author_sort) VALUES (?, ?)',
(title, 'calibre'))
db_id = obj.lastrowid
self.data.books_added([db_id], self)
self.set_path(db_id, index_is_id=True)
self.conn.commit()
try:
mi = get_metadata(stream, format)
except:
mi = Metadata(title, ['calibre'])
stream.seek(0)
mi.title, mi.authors = title, ['calibre']
mi.tags = [_('Catalog')]
mi.pubdate = mi.timestamp = utcnow()
if format == 'mobi':
mi.cover, mi.cover_data = None, (None, None)
self.set_metadata(db_id, mi)
self.add_format(db_id, format, stream, index_is_id=True)
self.conn.commit()
self.data.refresh_ids(self, [db_id]) # Needed to update format list and size
return db_id
def add_news(self, path, arg):
from calibre.ebooks.metadata.meta import get_metadata
format = os.path.splitext(path)[1][1:].lower()
stream = path if hasattr(path, 'read') else lopen(path, 'rb')
stream.seek(0)
mi = get_metadata(stream, format, use_libprs_metadata=False,
force_read_metadata=True)
# Force the author to calibre as the auto delete of old news checks for
# both the author==calibre and the tag News
mi.authors = ['calibre']
stream.seek(0)
if mi.series_index is None:
mi.series_index = self.get_next_series_num_for(mi.series)
mi.tags = [_('News')]
if arg['add_title_tag']:
mi.tags += [arg['title']]
if arg['custom_tags']:
mi.tags += arg['custom_tags']
obj = self.conn.execute('INSERT INTO books(title, author_sort) VALUES (?, ?)',
(mi.title, mi.authors[0]))
id = obj.lastrowid
self.data.books_added([id], self)
self.set_path(id, index_is_id=True)
self.conn.commit()
if mi.pubdate is None:
mi.pubdate = utcnow()
if mi.timestamp is None:
mi.timestamp = utcnow()
self.set_metadata(id, mi)
self.add_format(id, format, stream, index_is_id=True)
if not hasattr(path, 'read'):
stream.close()
self.conn.commit()
self.data.refresh_ids(self, [id]) # Needed to update format list and size
return id
def run_import_plugins(self, path_or_stream, format):
format = format.lower()
if hasattr(path_or_stream, 'seek'):
path_or_stream.seek(0)
pt = PersistentTemporaryFile('_import_plugin.'+format)
shutil.copyfileobj(path_or_stream, pt, 1024**2)
pt.close()
path = pt.name
else:
path = path_or_stream
return run_plugins_on_import(path, format)
def _add_newbook_tag(self, mi):
tags = prefs['new_book_tags']
if tags:
for tag in [t.strip() for t in tags]:
if tag:
if mi.tags is None:
mi.tags = [tag]
else:
mi.tags.append(tag)
def create_book_entry(self, mi, cover=None, add_duplicates=True,
force_id=None):
if mi.tags:
mi.tags = list(mi.tags)
self._add_newbook_tag(mi)
if not add_duplicates and self.has_book(mi):
return None
series_index = self.get_next_series_num_for(mi.series) \
if mi.series_index is None else mi.series_index
aus = mi.author_sort if mi.author_sort else self.author_sort_from_authors(mi.authors)
title = mi.title
if isbytestring(aus):
aus = aus.decode(preferred_encoding, 'replace')
if isbytestring(title):
title = title.decode(preferred_encoding, 'replace')
if force_id is None:
obj = self.conn.execute('INSERT INTO books(title, series_index, author_sort) VALUES (?, ?, ?)',
(title, series_index, aus))
id = obj.lastrowid
else:
id = force_id
obj = self.conn.execute(
'INSERT INTO books(id, title, series_index, '
'author_sort) VALUES (?, ?, ?, ?)',
(id, title, series_index, aus))
self.data.books_added([id], self)
if mi.timestamp is None:
mi.timestamp = utcnow()
if mi.pubdate is None:
mi.pubdate = UNDEFINED_DATE
self.set_metadata(id, mi, ignore_errors=True, commit=True)
if cover is not None:
try:
self.set_cover(id, cover)
except:
traceback.print_exc()
return id
def add_books(self, paths, formats, metadata, add_duplicates=True,
return_ids=False):
'''
Add a book to the database. The result cache is not updated.
:param:`paths` List of paths to book files or file-like objects
'''
formats, metadata = iter(formats), iter(metadata)
duplicates = []
ids = []
postimport = []
for path in paths:
mi = metadata.next()
self._add_newbook_tag(mi)
format = formats.next()
if not add_duplicates and self.has_book(mi):
duplicates.append((path, format, mi))
continue
series_index = self.get_next_series_num_for(mi.series) \
if mi.series_index is None else mi.series_index
aus = mi.author_sort if mi.author_sort else self.author_sort_from_authors(mi.authors)
title = mi.title
if isinstance(aus, str):
aus = aus.decode(preferred_encoding, 'replace')
if isinstance(title, str):
title = title.decode(preferred_encoding)
obj = self.conn.execute('INSERT INTO books(title, series_index, author_sort) VALUES (?, ?, ?)',
(title, series_index, aus))
id = obj.lastrowid
self.data.books_added([id], self)
ids.append(id)
if mi.timestamp is None:
mi.timestamp = utcnow()
if mi.pubdate is None:
mi.pubdate = UNDEFINED_DATE
self.set_metadata(id, mi, commit=True, ignore_errors=True)
npath = self.run_import_plugins(path, format)
format = os.path.splitext(npath)[-1].lower().replace('.', '').upper()
stream = lopen(npath, 'rb')
format = check_ebook_format(stream, format)
self.add_format(id, format, stream, index_is_id=True)
stream.close()
postimport.append((id, format))
self.conn.commit()
self.data.refresh_ids(self, ids) # Needed to update format list and size
for book_id, fmt in postimport:
run_plugins_on_postimport(self, book_id, fmt)
if duplicates:
paths = list(duplicate[0] for duplicate in duplicates)
formats = list(duplicate[1] for duplicate in duplicates)
metadata = list(duplicate[2] for duplicate in duplicates)
return (paths, formats, metadata), (ids if return_ids else
len(ids))
return None, (ids if return_ids else len(ids))
def import_book(self, mi, formats, notify=True, import_hooks=True,
apply_import_tags=True, preserve_uuid=False):
series_index = self.get_next_series_num_for(mi.series) \
if mi.series_index is None else mi.series_index
if apply_import_tags:
self._add_newbook_tag(mi)
if not mi.title:
mi.title = _('Unknown')
if not mi.authors:
mi.authors = [_('Unknown')]
aus = mi.author_sort if mi.author_sort else self.author_sort_from_authors(mi.authors)
if isinstance(aus, str):
aus = aus.decode(preferred_encoding, 'replace')
title = mi.title if isinstance(mi.title, unicode) else \
mi.title.decode(preferred_encoding, 'replace')
obj = self.conn.execute('INSERT INTO books(title, series_index, author_sort) VALUES (?, ?, ?)',
(title, series_index, aus))
id = obj.lastrowid
self.data.books_added([id], self)
if mi.timestamp is None:
mi.timestamp = utcnow()
if mi.pubdate is None:
mi.pubdate = UNDEFINED_DATE
self.set_metadata(id, mi, ignore_errors=True, commit=True)
if preserve_uuid and mi.uuid:
self.set_uuid(id, mi.uuid, commit=False)
for path in formats:
ext = os.path.splitext(path)[1][1:].lower()
if ext == 'opf':
continue
if import_hooks:
self.add_format_with_hooks(id, ext, path, index_is_id=True)
else:
with lopen(path, 'rb') as f:
self.add_format(id, ext, f, index_is_id=True)
# Mark the book dirty, It probably already has been done by
# set_metadata, but probably isn't good enough
self.dirtied([id], commit=False)
self.conn.commit()
self.data.refresh_ids(self, [id]) # Needed to update format list and size
if notify:
self.notify('add', [id])
return id
def get_top_level_move_items(self):
items = set(os.listdir(self.library_path))
paths = set([])
for x in self.data.universal_set():
path = self.path(x, index_is_id=True)
path = path.split(os.sep)[0]
paths.add(path)
paths.add('metadata.db')
path_map = {}
for x in paths:
path_map[x] = x
if not self.is_case_sensitive:
for x in items:
path_map[x.lower()] = x
items = set(path_map)
paths = set([x.lower() for x in paths])
items = items.intersection(paths)
return items, path_map
def move_library_to(self, newloc, progress=lambda x: x):
if not os.path.exists(newloc):
os.makedirs(newloc)
old_dirs = set([])
items, path_map = self.get_top_level_move_items()
for x in items:
src = os.path.join(self.library_path, x)
dest = os.path.join(newloc, path_map[x])
if os.path.isdir(src):
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(src, dest)
old_dirs.add(src)
else:
if os.path.exists(dest):
os.remove(dest)
shutil.copyfile(src, dest)
x = path_map[x]
if not isinstance(x, unicode):
x = x.decode(filesystem_encoding, 'replace')
progress(x)
dbpath = os.path.join(newloc, os.path.basename(self.dbpath))
opath = self.dbpath
self.conn.close()
self.library_path, self.dbpath = newloc, dbpath
self.connect()
try:
os.unlink(opath)
except:
pass
for dir in old_dirs:
try:
shutil.rmtree(dir)
except:
pass
def __iter__(self):
for record in self.data._data:
if record is not None:
yield record
def all_ids(self):
x = self.FIELD_MAP['id']
for i in iter(self):
yield i[x]
def get_data_as_dict(self, prefix=None, authors_as_string=False, ids=None):
'''
Return all metadata stored in the database as a dict. Includes paths to
the cover and each format.
:param prefix: The prefix for all paths. By default, the prefix is the absolute path
to the library folder.
:param ids: Set of ids to return the data for. If None return data for
all entries in database.
'''
if prefix is None:
prefix = self.library_path
fdata = self.custom_column_num_map
FIELDS = set(['title', 'sort', 'authors', 'author_sort', 'publisher',
'rating', 'timestamp', 'size', 'tags', 'comments', 'series',
'series_index', 'uuid', 'pubdate', 'last_modified', 'identifiers',
'languages']).union(set(fdata))
for x, data in fdata.iteritems():
if data['datatype'] == 'series':
FIELDS.add('%d_index'%x)
data = []
for record in self.data:
if record is None:
continue
db_id = record[self.FIELD_MAP['id']]
if ids is not None and db_id not in ids:
continue
x = {}
for field in FIELDS:
x[field] = record[self.FIELD_MAP[field]]
data.append(x)
x['id'] = db_id
x['formats'] = []
isbn = self.isbn(db_id, index_is_id=True)
x['isbn'] = isbn if isbn else ''
if not x['authors']:
x['authors'] = _('Unknown')
x['authors'] = [i.replace('|', ',') for i in x['authors'].split(',')]
if authors_as_string:
x['authors'] = authors_to_string(x['authors'])
x['tags'] = [i.replace('|', ',').strip() for i in x['tags'].split(',')] if x['tags'] else []
path = os.path.join(prefix, self.path(record[self.FIELD_MAP['id']], index_is_id=True))
x['cover'] = os.path.join(path, 'cover.jpg')
if not record[self.FIELD_MAP['cover']]:
x['cover'] = None
formats = self.formats(record[self.FIELD_MAP['id']], index_is_id=True)
if formats:
for fmt in formats.split(','):
path = self.format_abspath(x['id'], fmt, index_is_id=True)
if path is None:
continue
if prefix != self.library_path:
path = os.path.relpath(path, self.library_path)
path = os.path.join(prefix, path)
x['formats'].append(path)
x['fmt_'+fmt.lower()] = path
x['available_formats'] = [i.upper() for i in formats.split(',')]
return data
def migrate_old(self, db, progress):
from PyQt4.QtCore import QCoreApplication
header = _(u'<p>Migrating old database to ebook library in %s<br><center>')%self.library_path
progress.setValue(0)
progress.setLabelText(header)
QCoreApplication.processEvents()
db.conn.row_factory = lambda cursor, row: tuple(row)
db.conn.text_factory = lambda x: unicode(x, 'utf-8', 'replace')
books = db.conn.get('SELECT id, title, sort, timestamp, series_index, author_sort, isbn FROM books ORDER BY id ASC')
progress.setAutoReset(False)
progress.setRange(0, len(books))
for book in books:
self.conn.execute('INSERT INTO books(id, title, sort, timestamp, series_index, author_sort, isbn) VALUES(?, ?, ?, ?, ?, ?, ?, ?);', book)
tables = '''
authors ratings tags series books_tags_link
comments publishers
books_authors_link conversion_options
books_publishers_link
books_ratings_link
books_series_link feeds
'''.split()
for table in tables:
rows = db.conn.get('SELECT * FROM %s ORDER BY id ASC'%table)
for row in rows:
self.conn.execute('INSERT INTO %s VALUES(%s)'%(table, ','.join(repeat('?', len(row)))), row)
self.conn.commit()
self.refresh('timestamp', True)
for i, book in enumerate(books):
progress.setLabelText(header+_(u'Copying <b>%s</b>')%book[1])
id = book[0]
self.set_path(id, True)
formats = db.formats(id, index_is_id=True)
if not formats:
formats = []
else:
formats = formats.split(',')
for format in formats:
data = db.format(id, format, index_is_id=True)
if data:
self.add_format(id, format, cStringIO.StringIO(data), index_is_id=True)
cover = db.cover(id, index_is_id=True)
if cover:
self.set_cover(id, cover)
progress.setValue(i+1)
self.conn.commit()
progress.setLabelText(_('Compacting database'))
self.vacuum()
progress.reset()
return len(books)
def find_books_in_directory(self, dirpath, single_book_per_directory):
dirpath = os.path.abspath(dirpath)
if single_book_per_directory:
formats = []
for path in os.listdir(dirpath):
path = os.path.abspath(os.path.join(dirpath, path))
if os.path.isdir(path) or not os.access(path, os.R_OK):
continue
ext = os.path.splitext(path)[1]
if not ext:
continue
ext = ext[1:].lower()
if ext not in BOOK_EXTENSIONS and ext != 'opf':
continue
formats.append(path)
yield formats
else:
books = {}
for path in os.listdir(dirpath):
path = os.path.abspath(os.path.join(dirpath, path))
if os.path.isdir(path) or not os.access(path, os.R_OK):
continue
ext = os.path.splitext(path)[1]
if not ext:
continue
ext = ext[1:].lower()
if ext not in BOOK_EXTENSIONS:
continue
key = os.path.splitext(path)[0]
if key not in books:
books[key] = []
books[key].append(path)
for formats in books.values():
yield formats
def import_book_directory_multiple(self, dirpath, callback=None,
added_ids=None):
from calibre.ebooks.metadata.meta import metadata_from_formats
duplicates = []
for formats in self.find_books_in_directory(dirpath, False):
mi = metadata_from_formats(formats)
if mi.title is None:
continue
if self.has_book(mi):
duplicates.append((mi, formats))
continue
book_id = self.import_book(mi, formats)
if added_ids is not None:
added_ids.add(book_id)
if callable(callback):
if callback(mi.title):
break
return duplicates
def import_book_directory(self, dirpath, callback=None, added_ids=None):
from calibre.ebooks.metadata.meta import metadata_from_formats
dirpath = os.path.abspath(dirpath)
formats = self.find_books_in_directory(dirpath, True)
formats = list(formats)[0]
if not formats:
return
mi = metadata_from_formats(formats)
if mi.title is None:
return
if self.has_book(mi):
return [(mi, formats)]
book_id = self.import_book(mi, formats)
if added_ids is not None:
added_ids.add(book_id)
if callable(callback):
callback(mi.title)
def recursive_import(self, root, single_book_per_directory=True,
callback=None, added_ids=None):
root = os.path.abspath(root)
duplicates = []
for dirpath in os.walk(root):
res = (self.import_book_directory(dirpath[0], callback=callback,
added_ids=added_ids) if single_book_per_directory else
self.import_book_directory_multiple(dirpath[0],
callback=callback, added_ids=added_ids))
if res is not None:
duplicates.extend(res)
if callable(callback):
if callback(''):
break
return duplicates
def add_custom_book_data(self, book_id, name, val):
x = self.conn.get('SELECT id FROM books WHERE ID=?', (book_id,), all=False)
if x is None:
raise ValueError('add_custom_book_data: no such book_id %d'%book_id)
# Do the json encode first, in case it throws an exception
s = json.dumps(val, default=to_json)
self.conn.execute('''INSERT OR REPLACE INTO books_plugin_data(book, name, val)
VALUES(?, ?, ?)''', (book_id, name, s))
self.commit()
def add_multiple_custom_book_data(self, name, vals, delete_first=False):
if delete_first:
self.conn.execute('DELETE FROM books_plugin_data WHERE name=?', (name, ))
self.conn.executemany(
'INSERT OR REPLACE INTO books_plugin_data (book, name, val) VALUES (?, ?, ?)',
[(book_id, name, json.dumps(val, default=to_json))
for book_id, val in vals.iteritems()])
self.commit()
def get_custom_book_data(self, book_id, name, default=None):
try:
s = self.conn.get('''select val FROM books_plugin_data
WHERE book=? AND name=?''', (book_id, name), all=False)
if s is None:
return default
return json.loads(s, object_hook=from_json)
except:
pass
return default
def get_all_custom_book_data(self, name, default=None):
try:
s = self.conn.get('''select book, val FROM books_plugin_data
WHERE name=?''', (name,))
if s is None:
return default
res = {}
for r in s:
res[r[0]] = json.loads(r[1], object_hook=from_json)
return res
except:
pass
return default
def delete_custom_book_data(self, book_id, name):
self.conn.execute('DELETE FROM books_plugin_data WHERE book=? AND name=?',
(book_id, name))
self.commit()
def delete_all_custom_book_data(self, name):
self.conn.execute('DELETE FROM books_plugin_data WHERE name=?', (name, ))
self.commit()
def get_ids_for_custom_book_data(self, name):
s = self.conn.get('''SELECT book FROM books_plugin_data WHERE name=?''', (name,))
return [x[0] for x in s]
def get_usage_count_by_id(self, field):
fm = self.field_metadata[field]
if not fm.get('link_column', None):
raise ValueError('%s is not an is_multiple field')
return self.conn.get(
'SELECT {0}, count(*) FROM books_{1}_link GROUP BY {0}'.format(
fm['link_column'], fm['table']))
def all_author_names(self):
ai = self.FIELD_MAP['authors']
ans = set()
for rec in self.data.iterall():
auts = rec[ai]
if auts:
for x in auts.split(','):
ans.add(x.replace('|', ','))
return ans
def all_tag_names(self):
ai = self.FIELD_MAP['tags']
ans = set()
for rec in self.data.iterall():
auts = rec[ai]
if auts:
for x in auts.split(','):
ans.add(x)
return ans
def all_publisher_names(self):
ai = self.FIELD_MAP['publisher']
ans = set()
for rec in self.data.iterall():
auts = rec[ai]
if auts:
ans.add(auts)
return ans
def all_series_names(self):
ai = self.FIELD_MAP['series']
ans = set()
for rec in self.data.iterall():
auts = rec[ai]
if auts:
ans.add(auts)
return ans<|fim▁end|>
|
if notify:
|
<|file_name|>access_control_config.py<|end_file_name|><|fim▁begin|>## This file is part of Invenio.
## Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Invenio Access Control Config. """
__revision__ = \
"$Id$"
# pylint: disable=C0301
from invenio import config
from invenio.config import CFG_SITE_NAME, CFG_SITE_URL, CFG_SITE_LANG, \
CFG_SITE_SECURE_URL, CFG_SITE_SUPPORT_EMAIL, CFG_CERN_SITE, \
CFG_OPENAIRE_SITE, CFG_SITE_RECORD, CFG_INSPIRE_SITE, \
CFG_SITE_ADMIN_EMAIL
from invenio.messages import gettext_set_language
class InvenioWebAccessFireroleError(Exception):
"""Just an Exception to discover if it's a FireRole problem"""
pass
# VALUES TO BE EXPORTED
# CURRENTLY USED BY THE FILES access_control_engine.py access_control_admin.py webaccessadmin_lib.py
# name of the role giving superadmin rights
SUPERADMINROLE = 'superadmin'
# name of the webaccess webadmin role
WEBACCESSADMINROLE = 'webaccessadmin'
# name of the action allowing roles to access the web administrator interface
WEBACCESSACTION = 'cfgwebaccess'
# name of the action allowing roles to access the web administrator interface
VIEWRESTRCOLL = 'viewrestrcoll'
# name of the action allowing roles to delegate the rights to other roles
# ex: libraryadmin to delegate libraryworker
DELEGATEADDUSERROLE = 'accdelegaterole'
# max number of users to display in the drop down selects
MAXSELECTUSERS = 25
# max number of users to display in a page (mainly for user area)
MAXPAGEUSERS = 25
# default role definition, source:
CFG_ACC_EMPTY_ROLE_DEFINITION_SRC = 'deny all'
# default role definition, compiled:
CFG_ACC_EMPTY_ROLE_DEFINITION_OBJ = (False, ())
# default role definition, compiled and serialized:
CFG_ACC_EMPTY_ROLE_DEFINITION_SER = None
# List of tags containing (multiple) emails of users who should authorize
# to access the corresponding record regardless of collection restrictions.
if CFG_CERN_SITE:
CFG_ACC_GRANT_AUTHOR_RIGHTS_TO_EMAILS_IN_TAGS = ['859__f', '270__m']
else:
CFG_ACC_GRANT_AUTHOR_RIGHTS_TO_EMAILS_IN_TAGS = ['8560_f']
if CFG_CERN_SITE:
CFG_ACC_GRANT_VIEWER_RIGHTS_TO_EMAILS_IN_TAGS = ['506__m']
else:
CFG_ACC_GRANT_VIEWER_RIGHTS_TO_EMAILS_IN_TAGS = []
# Use external source for access control?
# CFG_EXTERNAL_AUTHENTICATION -- this is a dictionary with the enabled login method.
# The key is the name of the login method and the value is an instance of
# of the login method (see /help/admin/webaccess-admin-guide#5). Set the value
# to None if you wish to use the local Invenio authentication method.
# CFG_EXTERNAL_AUTH_DEFAULT -- set this to the key in CFG_EXTERNAL_AUTHENTICATION
# that should be considered as default login method
# CFG_EXTERNAL_AUTH_USING_SSO -- set this to the login method name of an SSO
# login method, if any, otherwise set this to None.
# CFG_EXTERNAL_AUTH_LOGOUT_SSO -- if CFG_EXTERNAL_AUTH_USING_SSO was not None
# set this to the URL that should be contacted to perform an SSO logout
from invenio.external_authentication_robot import ExternalAuthRobot
if CFG_CERN_SITE:
from invenio import external_authentication_sso as ea_sso
CFG_EXTERNAL_AUTH_USING_SSO = "CERN"
CFG_EXTERNAL_AUTH_DEFAULT = CFG_EXTERNAL_AUTH_USING_SSO
CFG_EXTERNAL_AUTH_LOGOUT_SSO = 'https://login.cern.ch/adfs/ls/?wa=wsignout1.0'
CFG_EXTERNAL_AUTHENTICATION = {
CFG_EXTERNAL_AUTH_USING_SSO : ea_sso.ExternalAuthSSO(),
}
elif CFG_OPENAIRE_SITE:
CFG_EXTERNAL_AUTH_DEFAULT = 'Local'
CFG_EXTERNAL_AUTH_USING_SSO = False
CFG_EXTERNAL_AUTH_LOGOUT_SSO = None
CFG_EXTERNAL_AUTHENTICATION = {
"Local": None,
"OpenAIRE": ExternalAuthRobot(enforce_external_nicknames=True, use_zlib=False, external_id_attribute_name="id"),
}
elif CFG_INSPIRE_SITE:
# INSPIRE specific robot configuration
CFG_EXTERNAL_AUTH_DEFAULT = 'Local'
CFG_EXTERNAL_AUTH_USING_SSO = False
CFG_EXTERNAL_AUTH_LOGOUT_SSO = None
CFG_EXTERNAL_AUTHENTICATION = {
"Local": None,
"Robot": ExternalAuthRobot(enforce_external_nicknames=True, use_zlib=False, check_user_ip=2, external_id_attribute_name='personid'),
"ZRobot": ExternalAuthRobot(enforce_external_nicknames=True, use_zlib=True, check_user_ip=2, external_id_attribute_name='personid')
}
else:
CFG_EXTERNAL_AUTH_DEFAULT = 'Local'
CFG_EXTERNAL_AUTH_USING_SSO = False
CFG_EXTERNAL_AUTH_LOGOUT_SSO = None
CFG_EXTERNAL_AUTHENTICATION = {
"Local": None,
"Robot": ExternalAuthRobot(enforce_external_nicknames=True, use_zlib=False),
"ZRobot": ExternalAuthRobot(enforce_external_nicknames=True, use_zlib=True)
}
# CFG_TEMP_EMAIL_ADDRESS
# Temporary email address for logging in with an OpenID/OAuth provider which
# doesn't supply email address
CFG_TEMP_EMAIL_ADDRESS = "%s@NOEMAIL"
# CFG_OPENID_PROVIDERS
# CFG_OAUTH1_PROVIDERS
# CFG_OAUTH2_PROVIDERS
# Choose which providers you want to use. Some providers don't supply e mail
# address, if you choose them, the users will be registered with an temporary
# email address like CFG_TEMP_EMAIL_ADDRESS % randomstring
#
# Order of the login buttons can be changed by CFG_EXTERNAL_LOGIN_BUTTON_ORDER
# in invenio.websession_config
CFG_OPENID_PROVIDERS = [
'google',
'yahoo',
'aol',
'wordpress',
'myvidoop',
'openid',
'verisign',
'myopenid',
'myspace',
'livejournal',
'blogger'
]
CFG_OAUTH1_PROVIDERS = [
'twitter',
'linkedin',
'flickr'
]
CFG_OAUTH2_PROVIDERS = [
'facebook',
'yammer',
'foursquare',
'googleoauth2',
'instagram',
'orcid'
]
# CFG_OPENID_CONFIGURATIONS
# identifier: (required) identifier url. {0} will be replaced by username (an
# input).
# trust_email: (optional, default: False) Some providers let their users
# change their emails on login page. If the provider doesn't let the user,
# set it True.
CFG_OPENID_CONFIGURATIONS = {
'openid': {
'identifier': '{0}'
},
'myvidoop': {
'identifier': '{0}.myvidoop.com'
},
'google': {
'identifier': 'https://www.google.com/accounts/o8/id',
'trust_email': True
},
'wordpress': {
'identifier': '{0}.wordpress.com'
},
'aol': {
'identifier': 'openid.aol.com/{0}',
'trust_email': True
},
'myopenid': {
'identifier': '{0}.myopenid.com'
},<|fim▁hole|> 'yahoo': {
'identifier': 'yahoo.com',
'trust_email': True
},
'verisign': {
'identifier': '{0}.pip.verisignlabs.com'
},
'myspace': {
'identifier': 'www.myspace.com/{0}'
},
'livejournal': {
'identifier': '{0}.livejournal.com'
},
'blogger': {
'identifier': '{0}'
}
}
# CFG_OAUTH1_CONFIGURATIONS
#
# !!IMPORTANT!!
# While creating an app in the provider site, the callback uri (redirect uri)
# must be in the form of :
# CFG_SITE_SECURE_URL/youraccount/login?login_method=oauth1&provider=PROVIDERNAME
#
# consumer_key: required
# Consumer key taken from provider.
#
# consumer_secret: required
# Consumer secret taken from provider.
#
# authorize_url: required
# The url to redirect the user for authorization
#
# authorize_parameters: optional
# Additional parameters for authorize_url (ie. scope)
#
# request_token_url: required
# The url to get request token
#
# access_token_url: required
# The url to exchange the request token with the access token
#
# request_url: optional
# The url to gather the user information
#
# request_parameters: optional
# Additional parameters for request_url
#
# email, nickname: optional
# id: required
# The location where these properties in the response returned from the
# provider.
# example:
# if the response is:
# {
# 'user': {
# 'user_name': 'ABC',
# 'contact': [
# {
# 'email': '[email protected]'
# }
# ]
# },
# 'user_id': 'XXX',
# }
# then:
# email must be : ['user', 'contact', 0, 'email']
# id must be: ['user_id']
# nickname must be: ['user', 'user_name']
#
# debug: optional
# When debug key is set to 1, after login process, the json object
# returned from provider is displayed on the screen. It may be used
# for finding where the id, email or nickname is.
CFG_OAUTH1_CONFIGURATIONS = {
'twitter': {
'consumer_key' : '',
'consumer_secret' : '',
'request_token_url' : 'https://api.twitter.com/oauth/request_token',
'access_token_url' : 'https://api.twitter.com/oauth/access_token',
'authorize_url' : 'https://api.twitter.com/oauth/authorize',
'id': ['user_id'],
'nickname': ['screen_name']
},
'flickr': {
'consumer_key' : '',
'consumer_secret' : '',
'request_token_url' : 'http://www.flickr.com/services/oauth/request_token',
'access_token_url' : 'http://www.flickr.com/services/oauth/access_token',
'authorize_url' : 'http://www.flickr.com/services/oauth/authorize',
'authorize_parameters': {
'perms': 'read'
},
'nickname': ['username'],
'id': ['user_nsid']
},
'linkedin': {
'consumer_key' : '',
'consumer_secret' : '',
'request_token_url' : 'https://api.linkedin.com/uas/oauth/requestToken',
'access_token_url' : 'https://api.linkedin.com/uas/oauth/accessToken',
'authorize_url' : 'https://www.linkedin.com/uas/oauth/authorize',
'request_url': 'http://api.linkedin.com/v1/people/~:(id)',
'request_parameters': {
'format': 'json'
},
'id': ['id']
}
}
# CFG_OAUTH2_CONFIGURATIONS
#
# !!IMPORTANT!!
# While creating an app in the provider site, the callback uri (redirect uri)
# must be in the form of :
# CFG_SITE_SECURE_URL/youraccount/login?login_method=oauth2&provider=PROVIDERNAME
#
# consumer_key: required
# Consumer key taken from provider.
#
# consumer_secret: required
# Consumer secret taken from provider.
#
# authorize_url: required
# The url to redirect the user for authorization
#
# authorize_parameters:
# Additional parameters for authorize_url (like scope)
#
# access_token_url: required
# The url to get the access token.
#
# request_url: required
# The url to gather the user information.
# {access_token} will be replaced by access token
#
# email, nickname: optional
# id: required
# The location where these properties in the response returned from the
# provider.
# !! See the example in CFG_OAUTH1_CONFIGURATIONS !!
#
# debug: optional
# When debug key is set to 1, after login process, the json object
# returned from provider is displayed on the screen. It may be used
# for finding where the id, email or nickname is.
CFG_OAUTH2_CONFIGURATIONS = {
'facebook': {
'consumer_key': '118319526393',
'consumer_secret': '8d675eb0ef89f2f8fbbe4ee56ab473c6',
'access_token_url': 'https://graph.facebook.com/oauth/access_token',
'authorize_url': 'https://www.facebook.com/dialog/oauth',
'authorize_parameters': {
'scope': 'email'
},
'request_url' : 'https://graph.facebook.com/me?access_token={access_token}',
'email': ['email'],
'id': ['id'],
'nickname': ['username']
},
'foursquare': {
'consumer_key': '',
'consumer_secret': '',
'access_token_url': 'https://foursquare.com/oauth2/access_token',
'authorize_url': 'https://foursquare.com/oauth2/authorize',
'request_url': 'https://api.foursquare.com/v2/users/self?oauth_token={access_token}',
'id': ['response', 'user', 'id'],
'email': ['response', 'user', 'contact' ,'email']
},
'yammer': {
'consumer_key': '',
'consumer_secret': '',
'access_token_url': 'https://www.yammer.com/oauth2/access_token.json',
'authorize_url': 'https://www.yammer.com/dialog/oauth',
'request_url': 'https://www.yammer.com/oauth2/access_token.json?access_token={access_token}',
'email':['user', 'contact', 'email_addresses', 0, 'address'],
'id': ['user', 'id'],
'nickname': ['user', 'name']
},
'googleoauth2': {
'consumer_key': '',
'consumer_secret': '',
'access_token_url': 'https://accounts.google.com/o/oauth2/token',
'authorize_url': 'https://accounts.google.com/o/oauth2/auth',
'authorize_parameters': {
'scope': 'https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email'
},
'request_url': 'https://www.googleapis.com/oauth2/v1/userinfo?access_token={access_token}',
'email':['email'],
'id': ['id']
},
'instagram': {
'consumer_key': '',
'consumer_secret': '',
'access_token_url': 'https://api.instagram.com/oauth/access_token',
'authorize_url': 'https://api.instagram.com/oauth/authorize/',
'authorize_parameters': {
'scope': 'basic'
},
'id': ['user', 'id'],
'nickname': ['user', 'username']
},
'orcid': {
'consumer_key': '',
'consumer_secret': '',
'authorize_url': 'http://sandbox-1.orcid.org/oauth/authorize',
'access_token_url': 'http://api.sandbox-1.orcid.org/oauth/token',
'request_url': 'http://api.sandbox-1.orcid.org/{id}/orcid-profile',
'authorize_parameters': {
'scope': '/orcid-profile/read-limited',
'response_type': 'code',
'access_type': 'offline',
},
'id': ['orcid'],
}
}
## Let's override OpenID/OAuth1/OAuth2 configuration from invenio(-local).conf
CFG_OPENID_PROVIDERS = config.CFG_OPENID_PROVIDERS
CFG_OAUTH1_PROVIDERS = config.CFG_OAUTH1_PROVIDERS
CFG_OAUTH2_PROVIDERS = config.CFG_OAUTH2_PROVIDERS
if config.CFG_OPENID_CONFIGURATIONS:
for provider, configuration in config.CFG_OPENID_CONFIGURATIONS.items():
if provider in CFG_OPENID_CONFIGURATIONS:
CFG_OPENID_CONFIGURATIONS[provider].update(configuration)
else:
CFG_OPENID_CONFIGURATIONS[provider] = configuration
if config.CFG_OAUTH1_CONFIGURATIONS:
for provider, configuration in config.CFG_OAUTH1_CONFIGURATIONS.items():
if provider in CFG_OAUTH1_CONFIGURATIONS:
CFG_OAUTH1_CONFIGURATIONS[provider].update(configuration)
else:
CFG_OAUTH1_CONFIGURATIONS[provider] = configuration
if config.CFG_OAUTH2_CONFIGURATIONS:
for provider, configuration in config.CFG_OAUTH2_CONFIGURATIONS.items():
if provider in CFG_OAUTH2_CONFIGURATIONS:
CFG_OAUTH2_CONFIGURATIONS[provider].update(configuration)
else:
CFG_OAUTH2_CONFIGURATIONS[provider] = configuration
# If OpenID authentication is enabled, add 'openid' to login methods
CFG_OPENID_AUTHENTICATION = bool(CFG_OPENID_PROVIDERS)
if CFG_OPENID_AUTHENTICATION:
from invenio.external_authentication_openid import ExternalOpenID
CFG_EXTERNAL_AUTHENTICATION['openid'] = ExternalOpenID(enforce_external_nicknames=True)
# If OAuth1 authentication is enabled, add 'oauth1' to login methods.
CFG_OAUTH1_AUTHENTICATION = bool(CFG_OAUTH1_PROVIDERS)
if CFG_OAUTH1_PROVIDERS:
from invenio.external_authentication_oauth1 import ExternalOAuth1
CFG_EXTERNAL_AUTHENTICATION['oauth1'] = ExternalOAuth1(enforce_external_nicknames=True)
# If OAuth2 authentication is enabled, add 'oauth2' to login methods.
CFG_OAUTH2_AUTHENTICATION = bool(CFG_OAUTH2_PROVIDERS)
if CFG_OAUTH2_AUTHENTICATION:
from invenio.external_authentication_oauth2 import ExternalOAuth2
CFG_EXTERNAL_AUTHENTICATION['oauth2'] = ExternalOAuth2(enforce_external_nicknames=True)
## If using SSO, this is the number of seconds after which the keep-alive
## SSO handler is pinged again to provide fresh SSO information.
CFG_EXTERNAL_AUTH_SSO_REFRESH = 600
# default data for the add_default_settings function
# Note: by default the definition is set to deny any. This won't be a problem
# because userid directly connected with roles will still be allowed.
# roles
# name description definition
DEF_ROLES = ((SUPERADMINROLE, 'superuser with all rights', 'deny any'),
(WEBACCESSADMINROLE, 'WebAccess administrator', 'deny any'),
('anyuser', 'Any user', 'allow any'),
('basketusers', 'Users who can use baskets', 'allow any'),
('loanusers', 'Users who can use loans', 'allow any'),
('groupusers', 'Users who can use groups', 'allow any'),
('alertusers', 'Users who can use alerts', 'allow any'),
('messageusers', 'Users who can use messages', 'allow any'),
('holdingsusers', 'Users who can view holdings', 'allow any'),
('statisticsusers', 'Users who can view statistics', 'allow any'),
('claimpaperusers', 'Users who can perform changes to their own paper attributions without the need for an operator\'s approval', 'allow any'),
('claimpaperoperators', 'Users who can perform changes to _all_ paper attributions without the need for an operator\'s approval', 'deny any'),
('paperclaimviewers', 'Users who can view "claim my paper" facilities.', 'allow all'),
('paperattributionviewers', 'Users who can view "attribute this paper" facilities', 'allow all'),
('paperattributionlinkviewers', 'Users who can see attribution links in the search', 'allow all'),
)
# Demo site roles
DEF_DEMO_ROLES = (('photocurator', 'Photo collection curator', 'deny any'),
('thesesviewer', 'Theses and Drafts viewer', 'allow group "Theses and Drafts viewers"'),
('ALEPHviewer', 'ALEPH viewer', 'allow group "ALEPH viewers"'),
('ISOLDEnotesviewer', 'ISOLDE Internal Notes viewer', 'allow group "ISOLDE Internal Notes viewers"'), ('thesescurator', 'Theses collection curator', 'deny any'),
('swordcurator', 'BibSword client curator', 'deny any'),
('referee_DEMOBOO_*', 'Book collection curator', 'deny any'),
('restrictedpicturesviewer', 'Restricted pictures viewer', 'deny any'),
('curator', 'Curator', 'deny any'),
('basketusers', 'Users who can use baskets', 'deny email "[email protected]"\nallow any'),
('claimpaperusers', 'Users who can perform changes to their own paper attributions without the need for an operator\'s approval', 'deny email "[email protected]"\nallow any'),
('submit_DEMOJRN_*', 'Users who can submit (and modify) "Atlantis Times" articles', 'deny all'),
('atlantiseditor', 'Users who can configure "Atlantis Times" journal', 'deny all'),
('commentmoderator', 'Users who can moderate comments', 'deny all'),
('poetrycommentreader', 'Users who can view comments in Poetry collection', 'deny all'))
DEF_DEMO_USER_ROLES = (('[email protected]', 'thesesviewer'),
('[email protected]', 'ALEPHviewer'),
('[email protected]', 'ISOLDEnotesviewer'),
('[email protected]', 'swordcurator'),
('[email protected]', 'claimpaperusers'),
('[email protected]', 'referee_DEMOBOO_*'),
('[email protected]', 'curator'),
('[email protected]', 'restrictedpicturesviewer'),
('[email protected]', 'swordcurator'),
('[email protected]', 'thesescurator'),
('[email protected]', 'restrictedpicturesviewer'),
('[email protected]', 'photocurator'),
('[email protected]', 'submit_DEMOJRN_*'),
('[email protected]', 'submit_DEMOJRN_*'),
('[email protected]', 'atlantiseditor'),
('[email protected]', 'poetrycommentreader'))
# users
# list of e-mail addresses
DEF_USERS = []
# actions
# name desc allowedkeywords optional
DEF_ACTIONS = (
('cfgwebsearch', 'configure WebSearch', '', 'no'),
('cfgbibformat', 'configure BibFormat', '', 'no'),
('cfgbibknowledge', 'configure BibKnowledge', '', 'no'),
('cfgwebsubmit', 'configure WebSubmit', '', 'no'),
('cfgbibrank', 'configure BibRank', '', 'no'),
('cfgwebcomment', 'configure WebComment', '', 'no'),
('cfgweblinkback', 'configure WebLinkback' , '', 'no'),
('cfgoaiharvest', 'configure OAI Harvest', '', 'no'),
('cfgoairepository', 'configure OAI Repository', '', 'no'),
('cfgbibindex', 'configure BibIndex', '', 'no'),
('cfgbibexport', 'configure BibExport', '', 'no'),
('cfgrobotkeys', 'configure Robot keys', 'login_method,robot', 'yes'),
('cfgbibsort', 'configure BibSort', '', 'no'),
('runbibindex', 'run BibIndex', '', 'no'),
('runbibupload', 'run BibUpload', '', 'no'),
('runwebcoll', 'run webcoll', 'collection', 'yes'),
('runbibformat', 'run BibFormat', 'format', 'yes'),
('runbibclassify', 'run BibClassify', 'taxonomy', 'yes'),
('runbibtaskex', 'run BibTaskEx example', '', 'no'),
('runbibrank', 'run BibRank', '', 'no'),
('runoaiharvest', 'run oaiharvest task', '', 'no'),
('runoairepository', 'run oairepositoryupdater task', '', 'no'),
('runbibedit', 'run Record Editor', 'collection', 'yes'),
('runbibeditmulti', 'run Multi-Record Editor', '', 'no'),
('runbibdocfile', 'run Document File Manager', '', 'no'),
('runbibmerge', 'run Record Merger', '', 'no'),
('runbibswordclient', 'run BibSword client', '', 'no'),
('runwebstatadmin', 'run WebStadAdmin', '', 'no'),
('runinveniogc', 'run InvenioGC', '', 'no'),
('runbibexport', 'run BibExport', '', 'no'),
('referee', 'referee document type doctype/category categ', 'doctype,categ', 'yes'),
('submit', 'use webSubmit', 'doctype,act,categ', 'yes'),
('viewrestrdoc', 'view restricted document', 'status', 'no'),
('viewrestrcomment', 'view restricted comment', 'status', 'no'),
(WEBACCESSACTION, 'configure WebAccess', '', 'no'),
(DELEGATEADDUSERROLE, 'delegate subroles inside WebAccess', 'role', 'no'),
(VIEWRESTRCOLL, 'view restricted collection', 'collection', 'no'),
('cfgwebjournal', 'configure WebJournal', 'name,with_editor_rights', 'no'),
('viewcomment', 'view comments', 'collection', 'no'),
('viewlinkbacks', 'view linkbacks', 'collection', 'no'),
('sendcomment', 'send comments', 'collection', 'no'),
('attachcommentfile', 'attach files to comments', 'collection', 'no'),
('attachsubmissionfile', 'upload files to drop box during submission', '', 'no'),
('cfgbibexport', 'configure BibExport', '', 'no'),
('runbibexport', 'run BibExport', '', 'no'),
('usebaskets', 'use baskets', '', 'no'),
('useloans', 'use loans', '', 'no'),
('usegroups', 'use groups', '', 'no'),
('usealerts', 'use alerts', '', 'no'),
('usemessages', 'use messages', '', 'no'),
('viewholdings', 'view holdings', 'collection', 'yes'),
('viewstatistics', 'view statistics', 'collection', 'yes'),
('runbibcirculation', 'run BibCirculation', '', 'no'),
('moderatecomments', 'moderate comments', 'collection', 'no'),
('moderatelinkbacks', 'moderate linkbacks', 'collection', 'no'),
('runbatchuploader', 'run batchuploader', 'collection', 'yes'),
('runbibtasklet', 'run BibTaskLet', '', 'no'),
('claimpaper_view_pid_universe', 'View the Claim Paper interface', '', 'no'),
('claimpaper_claim_own_papers', 'Clam papers to his own personID', '', 'no'),
('claimpaper_claim_others_papers', 'Claim papers for others', '', 'no'),
('claimpaper_change_own_data', 'Change data associated to his own person ID', '', 'no'),
('claimpaper_change_others_data', 'Change data of any person ID', '', 'no'),
('runbibtasklet', 'run BibTaskLet', '', 'no'),
('cfgbibsched', 'configure BibSched', '', 'no')
)
# Default authorizations
# role action arguments
DEF_AUTHS = (('basketusers', 'usebaskets', {}),
('loanusers', 'useloans', {}),
('groupusers', 'usegroups', {}),
('alertusers', 'usealerts', {}),
('messageusers', 'usemessages', {}),
('holdingsusers', 'viewholdings', {}),
('statisticsusers', 'viewstatistics', {}),
('claimpaperusers', 'claimpaper_view_pid_universe', {}),
('claimpaperoperators', 'claimpaper_view_pid_universe', {}),
('claimpaperusers', 'claimpaper_claim_own_papers', {}),
('claimpaperoperators', 'claimpaper_claim_own_papers', {}),
('claimpaperoperators', 'claimpaper_claim_others_papers', {}),
('claimpaperusers', 'claimpaper_change_own_data', {}),
('claimpaperoperators', 'claimpaper_change_own_data', {}),
('claimpaperoperators', 'claimpaper_change_others_data', {}),
)
# Demo site authorizations
# role action arguments
DEF_DEMO_AUTHS = (
('photocurator', 'runwebcoll', {'collection': 'Pictures'}),
('restrictedpicturesviewer', 'viewrestrdoc', {'status': 'restricted_picture'}),
('thesesviewer', VIEWRESTRCOLL, {'collection': 'Theses'}),
('thesesviewer', VIEWRESTRCOLL, {'collection': 'Drafts'}),
('ALEPHviewer', VIEWRESTRCOLL, {'collection': 'ALEPH Theses'}),
('ALEPHviewer', VIEWRESTRCOLL, {'collection': 'ALEPH Internal Notes'}),
('ISOLDEnotesviewer', VIEWRESTRCOLL, {'collection': 'ISOLDE Internal Notes'}),
('referee_DEMOBOO_*', 'referee', {'doctype': 'DEMOBOO', 'categ': '*'}),
('curator', 'cfgbibknowledge', {}),
('curator', 'runbibedit', {}),
('curator', 'runbibeditmulti', {}),
('curator', 'runbibmerge', {}),
('swordcurator', 'runbibswordclient', {}),
('thesescurator', 'runbibedit', {'collection': 'Theses'}),
('thesescurator', VIEWRESTRCOLL, {'collection': 'Theses'}),
('photocurator', 'runbibedit', {'collection': 'Pictures'}),
('referee_DEMOBOO_*', 'runbibedit', {'collection': 'Books'}),
('submit_DEMOJRN_*', 'submit', {'doctype': 'DEMOJRN', 'act': 'SBI', 'categ': '*'}),
('submit_DEMOJRN_*', 'submit', {'doctype': 'DEMOJRN', 'act': 'MBI', 'categ': '*'}),
('submit_DEMOJRN_*', 'cfgwebjournal', {'name': 'AtlantisTimes', 'with_editor_rights': 'no'}),
('atlantiseditor', 'cfgwebjournal', {'name': 'AtlantisTimes', 'with_editor_rights': 'yes'}),
('referee_DEMOBOO_*', 'runbatchuploader', {'collection': 'Books'}),
('poetrycommentreader', 'viewcomment', {'collection': 'Poetry'}),
('atlantiseditor', VIEWRESTRCOLL, {'collection': 'Atlantis Times Drafts'}),
('anyuser', 'submit', {'doctype': 'DEMOART', 'act': 'SBI', 'categ': 'ARTICLE'}),
)
_ = gettext_set_language(CFG_SITE_LANG)
# Activities (i.e. actions) for which exists an administrative web interface.
CFG_ACC_ACTIVITIES_URLS = {
'runbibedit' : (_("Run Record Editor"), "%s/%s/edit/?ln=%%s" % (CFG_SITE_URL, CFG_SITE_RECORD)),
'runbibeditmulti' : (_("Run Multi-Record Editor"), "%s/%s/multiedit/?ln=%%s" % (CFG_SITE_URL, CFG_SITE_RECORD)),
'runbibdocfile' : (_("Run Document File Manager"), "%s/%s/managedocfiles?ln=%%s" % (CFG_SITE_URL, CFG_SITE_RECORD)),
'runbibmerge' : (_("Run Record Merger"), "%s/%s/merge/?ln=%%s" % (CFG_SITE_URL, CFG_SITE_RECORD)),
'runbibswordclient' : (_("Run BibSword client"), "%s/bibsword/?ln=%%s" % CFG_SITE_URL),
'cfgbibknowledge' : (_("Configure BibKnowledge"), "%s/kb?ln=%%s" % CFG_SITE_URL),
'cfgbibformat' : (_("Configure BibFormat"), "%s/admin/bibformat/bibformatadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgoaiharvest' : (_("Configure OAI Harvest"), "%s/admin/oaiharvest/oaiharvestadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgoairepository' : (_("Configure OAI Repository"), "%s/admin/oairepository/oairepositoryadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgbibindex' : (_("Configure BibIndex"), "%s/admin/bibindex/bibindexadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgbibrank' : (_("Configure BibRank"), "%s/admin/bibrank/bibrankadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgwebaccess' : (_("Configure WebAccess"), "%s/admin/webaccess/webaccessadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgwebcomment' : (_("Configure WebComment"), "%s/admin/webcomment/webcommentadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgweblinkback' : (_("Configure WebLinkback"), "%s/admin/weblinkback/weblinkbackadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgwebsearch' : (_("Configure WebSearch"), "%s/admin/websearch/websearchadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgwebsubmit' : (_("Configure WebSubmit"), "%s/admin/websubmit/websubmitadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgwebjournal' : (_("Configure WebJournal"), "%s/admin/webjournal/webjournaladmin.py?ln=%%s" % CFG_SITE_URL),
'cfgbibsort' : (_("Configure BibSort"), "%s/admin/bibsort/bibsortadmin.py?ln=%%s" % CFG_SITE_URL),
'runbibcirculation' : (_("Run BibCirculation"), "%s/admin/bibcirculation/bibcirculationadmin.py?ln=%%s" % CFG_SITE_URL),
'runbatchuploader' : (_("Run Batch Uploader"), "%s/batchuploader/metadata?ln=%%s" % CFG_SITE_URL),
'claimpaper_claim_others_papers' : (_("Run Person/Author Manager"), "%s/person/search?ln=%%s" % CFG_SITE_URL)
}
CFG_WEBACCESS_MSGS = {
0: 'Try to <a href="%s/youraccount/login?referer=%%s">login</a> with another account.' % (CFG_SITE_SECURE_URL),
1: '<br />If you think this is not correct, please contact: <a href="mailto:%s">%s</a>' % (CFG_SITE_SUPPORT_EMAIL, CFG_SITE_SUPPORT_EMAIL),
2: '<br />If you have any questions, please write to <a href="mailto:%s">%s</a>' % (CFG_SITE_SUPPORT_EMAIL, CFG_SITE_SUPPORT_EMAIL),
3: 'Guest users are not allowed, please <a href="%s/youraccount/login">login</a>.' % CFG_SITE_SECURE_URL,
4: 'The site is temporarily closed for maintenance. Please come back soon.',
5: 'Authorization failure',
6: '%s temporarily closed' % CFG_SITE_NAME,
7: 'This functionality is temporarily closed due to server maintenance. Please use only the search engine in the meantime.',
8: 'Functionality temporarily closed'
}
CFG_WEBACCESS_WARNING_MSGS = {
0: 'Authorization granted',
1: 'You are not authorized to perform this action.',
2: 'You are not authorized to perform any action.',
3: 'The action %s does not exist.',
4: 'Unexpected error occurred.',
5: 'Missing mandatory keyword argument(s) for this action.',
6: 'Guest accounts are not authorized to perform this action.',
7: 'Not enough arguments, user ID and action name required.',
8: 'Incorrect keyword argument(s) for this action.',
9: """Account '%s' is not yet activated.""",
10: """You were not authorized by the authentication method '%s'.""",
11: """The selected login method '%s' is not the default method for this account, please try another one.""",
12: """Selected login method '%s' does not exist.""",
13: """Could not register '%s' account.""",
14: """Could not login using '%s', because this user is unknown.""",
15: """Could not login using your '%s' account, because you have introduced a wrong password.""",
16: """External authentication troubles using '%s' (maybe temporary network problems).""",
17: """You have not yet confirmed the email address for the '%s' authentication method.""",
18: """The administrator has not yet activated your account for the '%s' authentication method.""",
19: """The site is having troubles in sending you an email for confirming your email address. The error has been logged and will be taken care of as soon as possible.""",
20: """No roles are authorized to perform action %s with the given parameters.""",
21: """Verification cancelled""",
22: """Verification failed. Please try again or use another provider to login""",
23: """Verification failed. It is probably because the configuration isn't set properly. Please contact with the <a href="mailto:%s">administator</a>""" % CFG_SITE_ADMIN_EMAIL
}
#There are three status key that must be here: OK, REMOVED and REVOKED
#the value doesn't matter at all
CFG_WEB_API_KEY_STATUS = {
'OK':'OK',
'REMOVED':'REMOVED',
'REVOKED':'REVOKED',
'WARNING':'WARNING'
}<|fim▁end|>
| |
<|file_name|>Root.js<|end_file_name|><|fim▁begin|>import React from 'react'
import { Router, Route, hashHistory, browserHistory, IndexRoute } from 'react-router'<|fim▁hole|>import Register from '../components/hello/Register'
import Index from '../components/index/Index'
import HelloWorld from '../components/hello/HelloWorld'
import Header from '../components/common/Header'
import Xiexie from '../components/write/Write'
import ArticleDetail from '../components/index/ArticleDetail'
class Root extends React.Component {
render() {
return (
<Router history={browserHistory}>
<Route path="/" component={Header}>
<IndexRoute component={Index}/>
<Route path="/xiexie" component={Xiexie}/>
<Route path="/articleDetail" component={ArticleDetail}/>
</Route>
</Router>
)
}
}
export default Root<|fim▁end|>
|
import MainContainer from '../components/MainContainer'
import Login from '../components/hello/Login'
|
<|file_name|>HCenter3.py<|end_file_name|><|fim▁begin|># encoding: utf-8
from yast import import_module
import_module('UI')
from yast import *
class HCenter3Client:<|fim▁hole|> VCenter(PushButton(Opt("vstretch"), "Button 1")),
VCenter(PushButton(Opt("vstretch"), "Button 2")),
VCenter(PushButton(Opt("vstretch"), "Button 3"))
)
)
UI.UserInput()
UI.CloseDialog()
HCenter3Client().main()<|fim▁end|>
|
def main(self):
UI.OpenDialog(
Opt("defaultsize"),
VBox(
|
<|file_name|>remove-gallery-image.component.ts<|end_file_name|><|fim▁begin|>import { Component, EventEmitter, Input, Output } from '@angular/core';
import { GalleryImageService } from '../../../../shared/services/gallery-image.service';
@Component({
selector: 'respond-remove-gallery-image',
templateUrl: 'remove-gallery-image.component.html',
providers: [GalleryImageService]
})
export class RemoveGalleryImageComponent {
routes;
// model to store
model;
_visible: boolean = false;
// visible input
@Input()
set visible(visible: boolean){
// set visible
this._visible = visible;
}
get visible() { return this._visible; }
// image input
@Input()
set image(image){
// set visible
this.model = image;
}<|fim▁hole|> // gallery input
@Input() gallery;
// outputs
@Output() onCancel = new EventEmitter<any>();
@Output() onUpdate = new EventEmitter<any>();
@Output() onError = new EventEmitter<any>();
constructor (private _galleryImageService: GalleryImageService) {}
/**
* Init
*/
ngOnInit() {
this.model = {
id: '',
name: '',
url: '',
caption: ''
};
}
/**
* Hides the modal
*/
hide() {
this._visible = false;
this.onCancel.emit(null);
}
/**
* Submits the gallery image
*/
submit() {
this._galleryImageService.remove(this.model.id, this.gallery.id)
.subscribe(
data => { this.success(); },
error => { this.onError.emit(<any>error); }
);
}
/**
* Handles a successful submission
*/
success() {
this._visible = false;
this.onUpdate.emit(null);
}
}<|fim▁end|>
| |
<|file_name|>replace_wrapped.js<|end_file_name|><|fim▁begin|>// Expects to be preceeded by javascript that creates a variable called selectRange that
// defines the segment to be wrapped and replaced.
// create custom range object for wrapSelection
var replaceRange = $.fn.range;
replaceRange.ClearVariables();
replaceRange.startContainer = selectRange.startContainer;
replaceRange.startOffset = selectRange.startOffset;
replaceRange.endContainer = selectRange.endContainer;
replaceRange.endOffset = selectRange.endOffset;
replaceRange.collapsed = selectRange.collapsed;
// Wrap the text to be replaced in a set of custom spans.
// This is done so we can operate on this text even if it extends over different
// inline tags.
var selectMarker = 'SigilReplace_' + new Date().getTime();
$().wrapSelection({fitToWord: false, selectClass: selectMarker, wrapRange: replaceRange});
// First, store the old contents so they can be undone, and then
// insert the new text in the first element of the wrapped range and clear the rest.
$('.'+selectMarker).each(function(n) {
if(n==0){
$(this).data('undo', $(this).html());
$(this).html("$ESCAPED_TEXT_HERE");
}
else {
$(this).data('undo', $(this).html());
// Assign an id so that this element isn't automatically deleted.
$(this).attr("id",selectMarker+n);
$(this).html('');
}
});
// We need to normalize the text nodes since they're screwed up now
selectRange.startContainer.parentNode.normalize();
// Set the cursor to point to the end of the replaced text.
selectRange.collapse( false );
var selection = window.getSelection();
selection.removeAllRanges();
selection.addRange(selectRange);
//Scroll to the cursor
var from_top = window.innerHeight / 2;<|fim▁hole|>
// Return the unique class name used to identify these elements so the change can be undone.
selectMarker.valueOf();<|fim▁end|>
|
$.scrollTo( selectRange.startContainer, 0, {offset: {top:-from_top, left:0 } } );
|
<|file_name|>overloaded-builtin-operators-0x.cpp<|end_file_name|><|fim▁begin|>// RUN: %clang_cc1 -fsyntax-only -fshow-overloads=best -std=c++11 -verify %s
<|fim▁hole|>struct X
{
operator T() const {return T();}
};
void test_char16t(X<char16_t> x) {
bool b = x == char16_t();
}<|fim▁end|>
|
template <class T>
|
<|file_name|>ActiveMQServerLogger.java<|end_file_name|><|fim▁begin|>/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.artemis.core.server;
/**
* Logger Code 22
*
* each message id must be 6 digits long starting with 10, the 3rd digit donates the level so
*
* INF0 1
* WARN 2
* DEBUG 3
* ERROR 4
* TRACE 5
* FATAL 6
*
* so an INFO message would be 101000 to 101999
*/
import javax.transaction.xa.Xid;
import java.io.File;
import java.net.SocketAddress;
import java.net.URI;
import java.util.List;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService;
import io.netty.channel.Channel;
import org.apache.activemq.artemis.api.core.ActiveMQExceptionType;
import org.apache.activemq.artemis.api.core.Pair;
import org.apache.activemq.artemis.api.core.SimpleString;
import org.apache.activemq.artemis.core.client.impl.ServerLocatorInternal;
import org.apache.activemq.artemis.core.config.Configuration;
import org.apache.activemq.artemis.core.io.IOCallback;
import org.apache.activemq.artemis.core.io.SequentialFile;
import org.apache.activemq.artemis.core.paging.cursor.PagePosition;
import org.apache.activemq.artemis.core.paging.cursor.PageSubscription;
import org.apache.activemq.artemis.core.persistence.OperationContext;
import org.apache.activemq.artemis.core.protocol.core.Packet;
import org.apache.activemq.artemis.core.protocol.core.impl.wireformat.BackupReplicationStartFailedMessage;
import org.apache.activemq.artemis.core.remoting.impl.netty.TransportConstants;
import org.apache.activemq.artemis.core.server.cluster.Bridge;
import org.apache.activemq.artemis.core.server.cluster.impl.BridgeImpl;
import org.apache.activemq.artemis.core.server.cluster.impl.ClusterConnectionImpl;
import org.apache.activemq.artemis.core.server.impl.ActiveMQServerImpl;
import org.apache.activemq.artemis.core.server.impl.ServerSessionImpl;
import org.apache.activemq.artemis.core.server.management.Notification;
import org.apache.activemq.artemis.utils.FutureLatch;
import org.jboss.logging.BasicLogger;
import org.jboss.logging.Logger;
import org.jboss.logging.annotations.Cause;
import org.jboss.logging.annotations.LogMessage;
import org.jboss.logging.annotations.Message;
import org.jboss.logging.annotations.MessageLogger;
import org.w3c.dom.Node;
@MessageLogger(projectCode = "AMQ")
public interface ActiveMQServerLogger extends BasicLogger {
/**
* The default logger.
*/
ActiveMQServerLogger LOGGER = Logger.getMessageLogger(ActiveMQServerLogger.class, ActiveMQServerLogger.class.getPackage().getName());
@LogMessage(level = Logger.Level.DEBUG)
@Message(id = 223000, value = "Received Interrupt Exception whilst waiting for component to shutdown: {0}", format = Message.Format.MESSAGE_FORMAT)
void interruptWhilstStoppingComponent(String componentClassName);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221000, value = "{0} Message Broker is starting with configuration {1}", format = Message.Format.MESSAGE_FORMAT)
void serverStarting(String type, Configuration configuration);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221001, value = "Apache ActiveMQ Artemis Message Broker version {0} [{1}, nodeID={2}] {3}", format = Message.Format.MESSAGE_FORMAT)
void serverStarted(String fullVersion, String name, SimpleString nodeId, String identity);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221002, value = "Apache ActiveMQ Artemis Message Broker version {0} [{1}] stopped, uptime {2}", format = Message.Format.MESSAGE_FORMAT)
void serverStopped(String version, SimpleString nodeId, String uptime);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221003, value = "Deploying queue {0}", format = Message.Format.MESSAGE_FORMAT)
void deployQueue(SimpleString queueName);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221004, value = "{0}", format = Message.Format.MESSAGE_FORMAT)
void dumpServerInfo(String serverInfo);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221005, value = "Deleting pending large message as it was not completed: {0}",
format = Message.Format.MESSAGE_FORMAT)
void deletingPendingMessage(Pair<Long, Long> msgToDelete);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221006, value = "Waiting to obtain live lock", format = Message.Format.MESSAGE_FORMAT)
void awaitingLiveLock();
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221007, value = "Server is now live", format = Message.Format.MESSAGE_FORMAT)
void serverIsLive();
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221008, value = "live server wants to restart, restarting server in backup", format = Message.Format.MESSAGE_FORMAT)
void awaitFailBack();
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221109, value = "Apache ActiveMQ Artemis Backup Server version {0} [{1}] started, waiting live to fail before it gets active",
format = Message.Format.MESSAGE_FORMAT)
void backupServerStarted(String version, SimpleString nodeID);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221010, value = "Backup Server is now live", format = Message.Format.MESSAGE_FORMAT)
void backupServerIsLive();
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221011, value = "Server {0} is now live", format = Message.Format.MESSAGE_FORMAT)
void serverIsLive(String identity);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221012, value = "Using AIO Journal", format = Message.Format.MESSAGE_FORMAT)
void journalUseAIO();
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221013, value = "Using NIO Journal", format = Message.Format.MESSAGE_FORMAT)
void journalUseNIO();
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221014, value = "{0}% loaded", format = Message.Format.MESSAGE_FORMAT)
void percentLoaded(Long percent);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221015, value = "Can not find queue {0} while reloading ACKNOWLEDGE_CURSOR, deleting record now",
format = Message.Format.MESSAGE_FORMAT)
void journalCannotFindQueueReloading(Long queueID);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221016,
value = "Can not find queue {0} while reloading PAGE_CURSOR_COUNTER_VALUE, deleting record now",
format = Message.Format.MESSAGE_FORMAT)
void journalCannotFindQueueReloadingPage(Long queueID);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221017, value = "Can not find queue {0} while reloading PAGE_CURSOR_COUNTER_INC, deleting record now",
format = Message.Format.MESSAGE_FORMAT)
void journalCannotFindQueueReloadingPageCursor(Long queueID);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221018, value = "Large message: {0} did not have any associated reference, file will be deleted",
format = Message.Format.MESSAGE_FORMAT)
void largeMessageWithNoRef(Long messageID);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221019, value = "Deleting unreferenced message id={0} from the journal", format = Message.Format.MESSAGE_FORMAT)
void journalUnreferencedMessage(Long messageID);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221020, value = "Started {0} Acceptor at {1}:{2,number,#} for protocols [{3}]", format = Message.Format.MESSAGE_FORMAT)
void startedAcceptor(String acceptorType, String host, Integer port, String enabledProtocols);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221021, value = "failed to remove connection", format = Message.Format.MESSAGE_FORMAT)
void errorRemovingConnection();
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221022, value = "unable to start connector service: {0}", format = Message.Format.MESSAGE_FORMAT)
void errorStartingConnectorService(@Cause Throwable e, String name);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221023, value = "unable to stop connector service: {0}", format = Message.Format.MESSAGE_FORMAT)
void errorStoppingConnectorService(@Cause Throwable e, String name);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221024, value = "Backup server {0} is synchronized with live-server.", format = Message.Format.MESSAGE_FORMAT)
void backupServerSynched(ActiveMQServerImpl server);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221025, value = "Replication: sending {0} (size={1}) to replica.", format = Message.Format.MESSAGE_FORMAT)
void replicaSyncFile(SequentialFile jf, Long size);
@LogMessage(level = Logger.Level.INFO)
@Message(
id = 221026,
value = "Bridge {0} connected to forwardingAddress={1}. {2} does not have any bindings. Messages will be ignored until a binding is created.",
format = Message.Format.MESSAGE_FORMAT)
void bridgeNoBindings(SimpleString name, SimpleString forwardingAddress, SimpleString address);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221027, value = "Bridge {0} is connected", format = Message.Format.MESSAGE_FORMAT)
void bridgeConnected(BridgeImpl name);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221028, value = "Bridge is stopping, will not retry", format = Message.Format.MESSAGE_FORMAT)
void bridgeStopping();
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221029, value = "stopped bridge {0}", format = Message.Format.MESSAGE_FORMAT)
void bridgeStopped(SimpleString name);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221030, value = "paused bridge {0}", format = Message.Format.MESSAGE_FORMAT)
void bridgePaused(SimpleString name);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221031, value = "backup announced", format = Message.Format.MESSAGE_FORMAT)
void backupAnnounced();
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221032, value = "Waiting to become backup node", format = Message.Format.MESSAGE_FORMAT)
void waitingToBecomeBackup();
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221033, value = "** got backup lock", format = Message.Format.MESSAGE_FORMAT)
void gotBackupLock();
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221034, value = "Waiting {0} to obtain live lock", format = Message.Format.MESSAGE_FORMAT)
void waitingToObtainLiveLock(String timeoutMessage);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221035, value = "Live Server Obtained live lock", format = Message.Format.MESSAGE_FORMAT)
void obtainedLiveLock();
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221036, value = "Message with duplicate ID {0} was already set at {1}. Move from {2} being ignored and message removed from {3}",
format = Message.Format.MESSAGE_FORMAT)
void messageWithDuplicateID(Object duplicateProperty,
SimpleString toAddress,
SimpleString address,
SimpleString simpleString);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221037, value = "{0} to become ''live''", format = Message.Format.MESSAGE_FORMAT)
void becomingLive(ActiveMQServer server);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221038, value = "Configuration option ''{0}'' is deprecated. Consult the manual for details.",
format = Message.Format.MESSAGE_FORMAT)
void deprecatedConfigurationOption(String deprecatedOption);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221039, value = "Restarting as Replicating backup server after live restart",
format = Message.Format.MESSAGE_FORMAT)
void restartingReplicatedBackupAfterFailback();
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221040, value = "Remote group coordinators has not started.", format = Message.Format.MESSAGE_FORMAT)
void remoteGroupCoordinatorsNotStarted();
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221041, value = "Cannot find queue {0} while reloading PAGE_CURSOR_COMPLETE, deleting record now",
format = Message.Format.MESSAGE_FORMAT)
void cantFindQueueOnPageComplete(long queueID);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221042,
value = "Bridge {0} timed out waiting for the completion of {1} messages, we will just shutdown the bridge after 10 seconds wait",
format = Message.Format.MESSAGE_FORMAT)
void timedOutWaitingCompletions(String bridgeName, long numberOfMessages);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221043, value = "Protocol module found: [{1}]. Adding protocol support for: {0}", format = Message.Format.MESSAGE_FORMAT)
void addingProtocolSupport(String protocolKey, String moduleName);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221045, value = "libaio is not available, switching the configuration into NIO", format = Message.Format.MESSAGE_FORMAT)
void switchingNIO();
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221046, value = "Unblocking message production on address ''{0}''; size is currently: {1} bytes; max-size-bytes: {2}", format = Message.Format.MESSAGE_FORMAT)
void unblockingMessageProduction(SimpleString addressName, long currentSize, long maxSize);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221047, value = "Backup Server has scaled down to live server", format = Message.Format.MESSAGE_FORMAT)
void backupServerScaledDown();
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221048, value = "Consumer {0}:{1} attached to queue ''{2}'' from {3} identified as ''slow.'' Expected consumption rate: {4} msgs/second; actual consumption rate: {5} msgs/second.", format = Message.Format.MESSAGE_FORMAT)
void slowConsumerDetected(String sessionID,
long consumerID,
String queueName,
String remoteAddress,
float slowConsumerThreshold,
float consumerRate);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221049, value = "Activating Replica for node: {0}", format = Message.Format.MESSAGE_FORMAT)
void activatingReplica(SimpleString nodeID);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221050, value = "Activating Shared Store Slave", format = Message.Format.MESSAGE_FORMAT)
void activatingSharedStoreSlave();
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221051, value = "Populating security roles from LDAP at: {0}", format = Message.Format.MESSAGE_FORMAT)
void populatingSecurityRolesFromLDAP(String url);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221052, value = "Deploying topic {0}", format = Message.Format.MESSAGE_FORMAT)
void deployTopic(SimpleString topicName);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221053,
value = "Disallowing use of vulnerable protocol ''{0}'' on acceptor ''{1}''. See http://www.oracle.com/technetwork/topics/security/poodlecve-2014-3566-2339408.html for more details.",
format = Message.Format.MESSAGE_FORMAT)
void disallowedProtocol(String protocol, String acceptorName);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221054, value = "libaio was found but the filesystem does not support AIO. Switching the configuration into NIO. Journal path: {0}", format = Message.Format.MESSAGE_FORMAT)
void switchingNIOonPath(String journalPath);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221055, value = "There were too many old replicated folders upon startup, removing {0}",
format = Message.Format.MESSAGE_FORMAT)
void removingBackupData(String path);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221056, value = "Reloading configuration ...{0}",
format = Message.Format.MESSAGE_FORMAT)
void reloadingConfiguration(String module);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221057, value = "Global Max Size is being adjusted to 1/2 of the JVM max size (-Xmx). being defined as {0}",
format = Message.Format.MESSAGE_FORMAT)
void usingDefaultPaging(long bytes);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221058, value = "resetting Journal File size from {0} to {1} to fit with alignment of {2}", format = Message.Format.MESSAGE_FORMAT)
void invalidJournalFileSize(int journalFileSize, int fileSize, int alignment);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221059, value = "Deleting old data directory {0} as the max folders is set to 0", format = Message.Format.MESSAGE_FORMAT)
void backupDeletingData(String oldPath);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221060, value = "Sending quorum vote request to {0}: {1}", format = Message.Format.MESSAGE_FORMAT)
void sendingQuorumVoteRequest(String remoteAddress, String vote);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221061, value = "Received quorum vote response from {0}: {1}", format = Message.Format.MESSAGE_FORMAT)
void receivedQuorumVoteResponse(String remoteAddress, String vote);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221062, value = "Received quorum vote request: {0}", format = Message.Format.MESSAGE_FORMAT)
void receivedQuorumVoteRequest(String vote);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221063, value = "Sending quorum vote response: {0}", format = Message.Format.MESSAGE_FORMAT)
void sendingQuorumVoteResponse(String vote);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221064, value = "Node {0} found in cluster topology", format = Message.Format.MESSAGE_FORMAT)
void nodeFoundInClusterTopology(String nodeId);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221065, value = "Node {0} not found in cluster topology", format = Message.Format.MESSAGE_FORMAT)
void nodeNotFoundInClusterTopology(String nodeId);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221066, value = "Initiating quorum vote: {0}", format = Message.Format.MESSAGE_FORMAT)
void initiatingQuorumVote(SimpleString vote);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221067, value = "Waiting {0} {1} for quorum vote results.", format = Message.Format.MESSAGE_FORMAT)
void waitingForQuorumVoteResults(int timeout, String unit);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221068, value = "Received all quorum votes.", format = Message.Format.MESSAGE_FORMAT)
void receivedAllQuorumVotes();
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221069, value = "Timeout waiting for quorum vote responses.", format = Message.Format.MESSAGE_FORMAT)
void timeoutWaitingForQuorumVoteResponses();
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221070, value = "Restarting as backup based on quorum vote results.", format = Message.Format.MESSAGE_FORMAT)
void restartingAsBackupBasedOnQuorumVoteResults();
@LogMessage(level = Logger.Level.INFO)
@Message(id = 221071, value = "Failing over based on quorum vote results.", format = Message.Format.MESSAGE_FORMAT)
void failingOverBasedOnQuorumVoteResults();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222000, value = "ActiveMQServer is being finalized and has not been stopped. Please remember to stop the server before letting it go out of scope",
format = Message.Format.MESSAGE_FORMAT)
void serverFinalisedWIthoutBeingSTopped();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222001, value = "Error closing sessions while stopping server", format = Message.Format.MESSAGE_FORMAT)
void errorClosingSessionsWhileStoppingServer(@Cause Exception e);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222002, value = "Timed out waiting for pool to terminate {0}. Interrupting all its threads!", format = Message.Format.MESSAGE_FORMAT)
void timedOutStoppingThreadpool(ExecutorService service);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222004, value = "Must specify an address for each divert. This one will not be deployed.", format = Message.Format.MESSAGE_FORMAT)
void divertWithNoAddress();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222005, value = "Must specify a forwarding address for each divert. This one will not be deployed.", format = Message.Format.MESSAGE_FORMAT)
void divertWithNoForwardingAddress();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222006, value = "Binding already exists with name {0}, divert will not be deployed", format = Message.Format.MESSAGE_FORMAT)
void divertBindingAlreadyExists(SimpleString bindingName);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222007, value = "Security risk! Apache ActiveMQ Artemis is running with the default cluster admin user and default password. Please see the cluster chapter in the ActiveMQ Artemis User Guide for instructions on how to change this.", format = Message.Format.MESSAGE_FORMAT)
void clusterSecurityRisk();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222008, value = "unable to restart server, please kill and restart manually", format = Message.Format.MESSAGE_FORMAT)
void serverRestartWarning();
@LogMessage(level = Logger.Level.WARN)
void serverRestartWarning(@Cause Exception e);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222009, value = "Unable to announce backup for replication. Trying to stop the server.", format = Message.Format.MESSAGE_FORMAT)
void replicationStartProblem(@Cause Exception e);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222010, value = "Critical IO Error, shutting down the server. file={1}, message={0}", format = Message.Format.MESSAGE_FORMAT)
void ioCriticalIOError(String message, String file, @Cause Throwable code);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222011, value = "Error stopping server", format = Message.Format.MESSAGE_FORMAT)
void errorStoppingServer(@Cause Exception e);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222012, value = "Timed out waiting for backup activation to exit", format = Message.Format.MESSAGE_FORMAT)
void backupActivationProblem();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222013, value = "Error when trying to start replication", format = Message.Format.MESSAGE_FORMAT)
void errorStartingReplication(@Cause Exception e);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222014, value = "Error when trying to stop replication", format = Message.Format.MESSAGE_FORMAT)
void errorStoppingReplication(@Cause Exception e);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222016, value = "Cannot deploy a connector with no name specified.", format = Message.Format.MESSAGE_FORMAT)
void connectorWithNoName();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222017, value = "There is already a connector with name {0} deployed. This one will not be deployed.", format = Message.Format.MESSAGE_FORMAT)
void connectorAlreadyDeployed(String name);
@LogMessage(level = Logger.Level.WARN)
@Message(
id = 222018,
value = "AIO was not located on this platform, it will fall back to using pure Java NIO. If your platform is Linux, install LibAIO to enable the AIO journal",
format = Message.Format.MESSAGE_FORMAT)
void AIONotFound();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222019, value = "There is already a discovery group with name {0} deployed. This one will not be deployed.", format = Message.Format.MESSAGE_FORMAT)
void discoveryGroupAlreadyDeployed(String name);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222020, value = "error scanning for URL''s", format = Message.Format.MESSAGE_FORMAT)
void errorScanningURLs(@Cause Exception e);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222021, value = "problem undeploying {0}", format = Message.Format.MESSAGE_FORMAT)
void problemUndeployingNode(@Cause Exception e, Node node);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222022, value = "Timed out waiting for paging cursor to stop {0} {1}", format = Message.Format.MESSAGE_FORMAT)
void timedOutStoppingPagingCursor(FutureLatch future, Executor executor);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222023, value = "problem cleaning page address {0}", format = Message.Format.MESSAGE_FORMAT)
void problemCleaningPageAddress(@Cause Exception e, SimpleString address);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222024, value = "Could not complete operations on IO context {0}",
format = Message.Format.MESSAGE_FORMAT)
void problemCompletingOperations(OperationContext e);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222025, value = "Problem cleaning page subscription counter", format = Message.Format.MESSAGE_FORMAT)
void problemCleaningPagesubscriptionCounter(@Cause Exception e);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222026, value = "Error on cleaning up cursor pages", format = Message.Format.MESSAGE_FORMAT)
void problemCleaningCursorPages(@Cause Exception e);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222027, value = "Timed out flushing executors for paging cursor to stop {0}", format = Message.Format.MESSAGE_FORMAT)
void timedOutFlushingExecutorsPagingCursor(PageSubscription pageSubscription);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222028, value = "Could not find page cache for page {0} removing it from the journal",
format = Message.Format.MESSAGE_FORMAT)
void pageNotFound(PagePosition pos);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222029,
value = "Could not locate page transaction {0}, ignoring message on position {1} on address={2} queue={3}",
format = Message.Format.MESSAGE_FORMAT)
void pageSubscriptionCouldntLoad(long transactionID, PagePosition position, SimpleString address, SimpleString name);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222030, value = "File {0} being renamed to {1}.invalidPage as it was loaded partially. Please verify your data.", format = Message.Format.MESSAGE_FORMAT)
void pageInvalid(String fileName, String name);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222031, value = "Error while deleting page file", format = Message.Format.MESSAGE_FORMAT)
void pageDeleteError(@Cause Exception e);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222032, value = "page finalise error", format = Message.Format.MESSAGE_FORMAT)
void pageFinaliseError(@Cause Exception e);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222033, value = "Page file {0} had incomplete records at position {1} at record number {2}", format = Message.Format.MESSAGE_FORMAT)
void pageSuspectFile(String fileName, int position, int msgNumber);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222034, value = "Can not delete page transaction id={0}", format = Message.Format.MESSAGE_FORMAT)
void pageTxDeleteError(@Cause Exception e, long recordID);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222035, value = "Directory {0} did not have an identification file {1}",
format = Message.Format.MESSAGE_FORMAT)
void pageStoreFactoryNoIdFile(String s, String addressFile);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222036, value = "Timed out on waiting PagingStore {0} to shutdown", format = Message.Format.MESSAGE_FORMAT)
void pageStoreTimeout(SimpleString address);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222037, value = "IO Error, impossible to start paging", format = Message.Format.MESSAGE_FORMAT)
void pageStoreStartIOError(@Cause Exception e);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222038, value = "Starting paging on address ''{0}''; size is currently: {1} bytes; max-size-bytes: {2}", format = Message.Format.MESSAGE_FORMAT)
void pageStoreStart(SimpleString storeName, long addressSize, long maxSize);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222039, value = "Messages sent to address ''{0}'' are being dropped; size is currently: {1} bytes; max-size-bytes: {2}", format = Message.Format.MESSAGE_FORMAT)
void pageStoreDropMessages(SimpleString storeName, long addressSize, long maxSize);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222040, value = "Server is stopped", format = Message.Format.MESSAGE_FORMAT)
void serverIsStopped();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222041, value = "Cannot find queue {0} to update delivery count", format = Message.Format.MESSAGE_FORMAT)
void journalCannotFindQueueDelCount(Long queueID);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222042, value = "Cannot find message {0} to update delivery count", format = Message.Format.MESSAGE_FORMAT)
void journalCannotFindMessageDelCount(Long msg);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222043, value = "Message for queue {0} which does not exist. This message will be ignored.", format = Message.Format.MESSAGE_FORMAT)
void journalCannotFindQueueForMessage(Long queueID);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222044, value = "It was not possible to delete message {0}", format = Message.Format.MESSAGE_FORMAT)
void journalErrorDeletingMessage(@Cause Exception e, Long messageID);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222045, value = "Message in prepared tx for queue {0} which does not exist. This message will be ignored.", format = Message.Format.MESSAGE_FORMAT)
void journalMessageInPreparedTX(Long queueID);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222046, value = "Failed to remove reference for {0}", format = Message.Format.MESSAGE_FORMAT)
void journalErrorRemovingRef(Long messageID);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222047, value = "Can not find queue {0} while reloading ACKNOWLEDGE_CURSOR",
format = Message.Format.MESSAGE_FORMAT)
void journalCannotFindQueueReloadingACK(Long queueID);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222048, value = "PAGE_CURSOR_COUNTER_VALUE record used on a prepared statement, invalid state", format = Message.Format.MESSAGE_FORMAT)
void journalPAGEOnPrepared();
@LogMessage(level = Logger.Level.WARN)
@Message(
id = 222049,
value = "InternalError: Record type {0} not recognized. Maybe you are using journal files created on a different version",
format = Message.Format.MESSAGE_FORMAT)
void journalInvalidRecordType(Byte recordType);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222050, value = "Can not locate recordType={0} on loadPreparedTransaction//deleteRecords",
format = Message.Format.MESSAGE_FORMAT)
void journalInvalidRecordTypeOnPreparedTX(Byte recordType);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222051, value = "Journal Error", format = Message.Format.MESSAGE_FORMAT)
void journalError(@Cause Exception e);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222052, value = "error incrementing delay detection", format = Message.Format.MESSAGE_FORMAT)
void errorIncrementDelayDeletionCount(@Cause Exception e);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222053, value = "Error on copying large message {0} for DLA or Expiry", format = Message.Format.MESSAGE_FORMAT)
void lareMessageErrorCopying(@Cause Exception e, LargeServerMessage largeServerMessage);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222054, value = "Error on executing IOCallback", format = Message.Format.MESSAGE_FORMAT)
void errorExecutingAIOCallback(@Cause Throwable t);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222055, value = "Error on deleting duplicate cache", format = Message.Format.MESSAGE_FORMAT)
void errorDeletingDuplicateCache(@Cause Exception e);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222056, value = "Did not route to any bindings for address {0} and sendToDLAOnNoRoute is true but there is no DLA configured for the address, the message will be ignored.",
format = Message.Format.MESSAGE_FORMAT)
void noDLA(SimpleString address);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222057, value = "It was not possible to add references due to an IO error code {0} message = {1}",
format = Message.Format.MESSAGE_FORMAT)
void ioErrorAddingReferences(Integer errorCode, String errorMessage);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222059, value = "Duplicate message detected - message will not be routed. Message information:\n{0}", format = Message.Format.MESSAGE_FORMAT)
void duplicateMessageDetected(org.apache.activemq.artemis.api.core.Message message);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222060, value = "Error while confirming large message completion on rollback for recordID={0}", format = Message.Format.MESSAGE_FORMAT)
void journalErrorConfirmingLargeMessage(@Cause Throwable e, Long messageID);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222061, value = "Client connection failed, clearing up resources for session {0}", format = Message.Format.MESSAGE_FORMAT)
void clientConnectionFailed(String name);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222062, value = "Cleared up resources for session {0}", format = Message.Format.MESSAGE_FORMAT)
void clearingUpSession(String name);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222063, value = "Error processing IOCallback code = {0} message = {1}", format = Message.Format.MESSAGE_FORMAT)
void errorProcessingIOCallback(Integer errorCode, String errorMessage);
@LogMessage(level = Logger.Level.DEBUG)
@Message(id = 222065, value = "Client is not being consistent on the request versioning. It just sent a version id={0} while it informed {1} previously", format = Message.Format.MESSAGE_FORMAT)
void incompatibleVersionAfterConnect(int version, int clientVersion);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222066, value = "Reattach request from {0} failed as there is no confirmationWindowSize configured, which may be ok for your system", format = Message.Format.MESSAGE_FORMAT)
void reattachRequestFailed(String remoteAddress);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222067, value = "Connection failure has been detected: {0} [code={1}]", format = Message.Format.MESSAGE_FORMAT)
void connectionFailureDetected(String message, ActiveMQExceptionType type);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222069, value = "error cleaning up stomp connection", format = Message.Format.MESSAGE_FORMAT)
void errorCleaningStompConn(@Cause Exception e);
<|fim▁hole|> @Message(id = 222070, value = "Stomp Transactional acknowledgement is not supported", format = Message.Format.MESSAGE_FORMAT)
void stompTXAckNorSupported();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222071, value = "Interrupted while waiting for stomp heartbeat to die", format = Message.Format.MESSAGE_FORMAT)
void errorOnStompHeartBeat(@Cause InterruptedException e);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222072, value = "Timed out flushing channel on InVMConnection", format = Message.Format.MESSAGE_FORMAT)
void timedOutFlushingInvmChannel();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 212074, value = "channel group did not completely close", format = Message.Format.MESSAGE_FORMAT)
void nettyChannelGroupError();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222075, value = "{0} is still connected to {1}", format = Message.Format.MESSAGE_FORMAT)
void nettyChannelStillOpen(Channel channel, SocketAddress remoteAddress);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222076, value = "channel group did not completely unbind", format = Message.Format.MESSAGE_FORMAT)
void nettyChannelGroupBindError();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222077, value = "{0} is still bound to {1}", format = Message.Format.MESSAGE_FORMAT)
void nettyChannelStillBound(Channel channel, SocketAddress remoteAddress);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222078, value = "Error instantiating remoting interceptor {0}", format = Message.Format.MESSAGE_FORMAT)
void errorCreatingRemotingInterceptor(@Cause Exception e, String interceptorClass);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222079, value = "The following keys are invalid for configuring the acceptor: {0} the acceptor will not be started.",
format = Message.Format.MESSAGE_FORMAT)
void invalidAcceptorKeys(String s);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222080, value = "Error instantiating remoting acceptor {0}", format = Message.Format.MESSAGE_FORMAT)
void errorCreatingAcceptor(@Cause Exception e, String factoryClassName);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222081, value = "Timed out waiting for remoting thread pool to terminate", format = Message.Format.MESSAGE_FORMAT)
void timeoutRemotingThreadPool();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222082, value = "error on connection failure check", format = Message.Format.MESSAGE_FORMAT)
void errorOnFailureCheck(@Cause Throwable e);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222083, value = "The following keys are invalid for configuring the connector service: {0} the connector will not be started.",
format = Message.Format.MESSAGE_FORMAT)
void connectorKeysInvalid(String s);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222084, value = "The following keys are required for configuring the connector service: {0} the connector will not be started.",
format = Message.Format.MESSAGE_FORMAT)
void connectorKeysMissing(String s);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222085, value = "Packet {0} can not be processed by the ReplicationEndpoint",
format = Message.Format.MESSAGE_FORMAT)
void invalidPacketForReplication(Packet packet);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222086, value = "error handling packet {0} for replication", format = Message.Format.MESSAGE_FORMAT)
void errorHandlingReplicationPacket(@Cause Exception e, Packet packet);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222087, value = "Replication Error while closing the page on backup", format = Message.Format.MESSAGE_FORMAT)
void errorClosingPageOnReplication(@Cause Exception e);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222088, value = "Journal comparison mismatch:\n{0}", format = Message.Format.MESSAGE_FORMAT)
void journalcomparisonMismatch(String s);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222089, value = "Replication Error deleting large message ID = {0}", format = Message.Format.MESSAGE_FORMAT)
void errorDeletingLargeMessage(@Cause Exception e, long messageId);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222090, value = "Replication Large MessageID {0} is not available on backup server. Ignoring replication message", format = Message.Format.MESSAGE_FORMAT)
void largeMessageNotAvailable(long messageId);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222091, value = "The backup node has been shut-down, replication will now stop", format = Message.Format.MESSAGE_FORMAT)
void replicationStopOnBackupShutdown();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222092, value = "Connection to the backup node failed, removing replication now", format = Message.Format.MESSAGE_FORMAT)
void replicationStopOnBackupFail(@Cause Exception e);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222093, value = "Timed out waiting to stop Bridge", format = Message.Format.MESSAGE_FORMAT)
void timedOutWaitingToStopBridge();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222094, value = "Bridge unable to send message {0}, will try again once bridge reconnects", format = Message.Format.MESSAGE_FORMAT)
void bridgeUnableToSendMessage(@Cause Exception e, MessageReference ref);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222095, value = "Connection failed with failedOver={0}", format = Message.Format.MESSAGE_FORMAT)
void bridgeConnectionFailed(Boolean failedOver);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222096, value = "Error on querying binding on bridge {0}. Retrying in 100 milliseconds", format = Message.Format.MESSAGE_FORMAT)
void errorQueryingBridge(@Cause Throwable t, SimpleString name);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222097, value = "Address {0} does not have any bindings, retry #({1})",
format = Message.Format.MESSAGE_FORMAT)
void errorQueryingBridge(SimpleString address, Integer retryCount);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222098, value = "Server is starting, retry to create the session for bridge {0}", format = Message.Format.MESSAGE_FORMAT)
void errorStartingBridge(SimpleString name);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222099, value = "Bridge {0} is unable to connect to destination. It will be disabled.", format = Message.Format.MESSAGE_FORMAT)
void errorConnectingBridge(@Cause Exception e, Bridge bridge);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222100, value = "ServerLocator was shutdown, can not retry on opening connection for bridge",
format = Message.Format.MESSAGE_FORMAT)
void bridgeLocatorShutdown();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222101, value = "Bridge {0} achieved {1} maxattempts={2} it will stop retrying to reconnect", format = Message.Format.MESSAGE_FORMAT)
void bridgeAbortStart(SimpleString name, Integer retryCount, Integer reconnectAttempts);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222102, value = "Unexpected exception while trying to reconnect", format = Message.Format.MESSAGE_FORMAT)
void errorReConnecting(@Cause Exception e);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222103, value = "transaction with xid {0} timed out", format = Message.Format.MESSAGE_FORMAT)
void timedOutXID(Xid xid);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222104, value = "IO Error completing the transaction, code = {0}, message = {1}", format = Message.Format.MESSAGE_FORMAT)
void ioErrorOnTX(Integer errorCode, String errorMessage);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222105, value = "Could not finish context execution in 10 seconds",
format = Message.Format.MESSAGE_FORMAT)
void errorCompletingContext(@Cause Exception e);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222106, value = "Replacing incomplete LargeMessage with ID={0}", format = Message.Format.MESSAGE_FORMAT)
void replacingIncompleteLargeMessage(Long messageID);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222107, value = "Cleared up resources for session {0}", format = Message.Format.MESSAGE_FORMAT)
void clientConnectionFailedClearingSession(String name);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222108, value = "unable to send notification when broadcast group is stopped",
format = Message.Format.MESSAGE_FORMAT)
void broadcastGroupClosed(@Cause Exception e);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222109, value = "Timed out waiting for write lock on consumer. Check the Thread dump", format = Message.Format.MESSAGE_FORMAT)
void timeoutLockingConsumer();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222110, value = "no queue IDs defined!, originalMessage = {0}, copiedMessage = {1}, props={2}",
format = Message.Format.MESSAGE_FORMAT)
void noQueueIdDefined(org.apache.activemq.artemis.api.core.Message message, org.apache.activemq.artemis.api.core.Message messageCopy, SimpleString idsHeaderName);
@LogMessage(level = Logger.Level.TRACE)
@Message(id = 222111, value = "exception while invoking {0} on {1}",
format = Message.Format.MESSAGE_FORMAT)
void managementOperationError(@Cause Exception e, String op, String resourceName);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222112, value = "exception while retrieving attribute {0} on {1}",
format = Message.Format.MESSAGE_FORMAT)
void managementAttributeError(@Cause Exception e, String att, String resourceName);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222113, value = "On ManagementService stop, there are {0} unexpected registered MBeans: {1}",
format = Message.Format.MESSAGE_FORMAT)
void managementStopError(Integer size, List<String> unexpectedResourceNames);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222114, value = "Unable to delete group binding info {0}",
format = Message.Format.MESSAGE_FORMAT)
void unableToDeleteGroupBindings(@Cause Exception e, SimpleString groupId);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222115, value = "Error closing serverLocator={0}",
format = Message.Format.MESSAGE_FORMAT)
void errorClosingServerLocator(@Cause Exception e, ServerLocatorInternal clusterLocator);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222116, value = "unable to start broadcast group {0}", format = Message.Format.MESSAGE_FORMAT)
void unableToStartBroadcastGroup(@Cause Exception e, String name);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222117, value = "unable to start cluster connection {0}", format = Message.Format.MESSAGE_FORMAT)
void unableToStartClusterConnection(@Cause Exception e, SimpleString name);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222118, value = "unable to start Bridge {0}", format = Message.Format.MESSAGE_FORMAT)
void unableToStartBridge(@Cause Exception e, SimpleString name);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222119, value = "No connector with name {0}. backup cannot be announced.",
format = Message.Format.MESSAGE_FORMAT)
void announceBackupNoConnector(String connectorName);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222120, value = "no cluster connections defined, unable to announce backup", format = Message.Format.MESSAGE_FORMAT)
void announceBackupNoClusterConnections();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222121, value = "Must specify a unique name for each bridge. This one will not be deployed.", format = Message.Format.MESSAGE_FORMAT)
void bridgeNotUnique();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222122, value = "Must specify a queue name for each bridge. This one {0} will not be deployed.", format = Message.Format.MESSAGE_FORMAT)
void bridgeNoQueue(String name);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222123, value = "Forward address is not specified on bridge {0}. Will use original message address instead", format = Message.Format.MESSAGE_FORMAT)
void bridgeNoForwardAddress(String bridgeName);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222124, value = "There is already a bridge with name {0} deployed. This one will not be deployed.", format = Message.Format.MESSAGE_FORMAT)
void bridgeAlreadyDeployed(String name);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222125, value = "No queue found with name {0} bridge {1} will not be deployed.", format = Message.Format.MESSAGE_FORMAT)
void bridgeQueueNotFound(String queueName, String bridgeName);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222126, value = "No discovery group found with name {0} bridge will not be deployed.", format = Message.Format.MESSAGE_FORMAT)
void bridgeNoDiscoveryGroup(String name);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222127, value = "Must specify a unique name for each cluster connection. This one will not be deployed.", format = Message.Format.MESSAGE_FORMAT)
void clusterConnectionNotUnique();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222128, value = "Must specify an address for each cluster connection. This one will not be deployed.", format = Message.Format.MESSAGE_FORMAT)
void clusterConnectionNoForwardAddress();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222129, value = "No connector with name {0}. The cluster connection will not be deployed.",
format = Message.Format.MESSAGE_FORMAT)
void clusterConnectionNoConnector(String connectorName);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222130,
value = "Cluster Configuration {0} already exists. The cluster connection will not be deployed.",
format = Message.Format.MESSAGE_FORMAT)
void clusterConnectionAlreadyExists(String connectorName);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222131, value = "No discovery group with name {0}. The cluster connection will not be deployed.",
format = Message.Format.MESSAGE_FORMAT)
void clusterConnectionNoDiscoveryGroup(String discoveryGroupName);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222132, value = "There is already a broadcast-group with name {0} deployed. This one will not be deployed.", format = Message.Format.MESSAGE_FORMAT)
void broadcastGroupAlreadyExists(String name);
@LogMessage(level = Logger.Level.WARN)
@Message(
id = 222133,
value = "There is no connector deployed with name {0}. The broadcast group with name {1} will not be deployed.",
format = Message.Format.MESSAGE_FORMAT)
void broadcastGroupNoConnector(String connectorName, String bgName);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222134, value = "No connector defined with name {0}. The bridge will not be deployed.",
format = Message.Format.MESSAGE_FORMAT)
void noConnector(String name);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222135, value = "Stopping Redistributor, Timed out waiting for tasks to complete", format = Message.Format.MESSAGE_FORMAT)
void errorStoppingRedistributor();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222136, value = "IO Error during redistribution, errorCode = {0} message = {1}", format = Message.Format.MESSAGE_FORMAT)
void ioErrorRedistributing(Integer errorCode, String errorMessage);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222137, value = "Unable to announce backup, retrying", format = Message.Format.MESSAGE_FORMAT)
void errorAnnouncingBackup(@Cause Throwable e);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222138, value = "Local Member is not set at on ClusterConnection {0}", format = Message.Format.MESSAGE_FORMAT)
void noLocalMemborOnClusterConnection(ClusterConnectionImpl clusterConnection);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222139, value = "{0}::Remote queue binding {1} has already been bound in the post office. Most likely cause for this is you have a loop in your cluster due to cluster max-hops being too large or you have multiple cluster connections to the same nodes using overlapping addresses",
format = Message.Format.MESSAGE_FORMAT)
void remoteQueueAlreadyBoundOnClusterConnection(Object messageFlowRecord, SimpleString clusterName);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222141, value = "Node Manager can not open file {0}", format = Message.Format.MESSAGE_FORMAT)
void nodeManagerCantOpenFile(@Cause Exception e, File file);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222142, value = "Error on resetting large message deliver - {0}", format = Message.Format.MESSAGE_FORMAT)
void errorResttingLargeMessage(@Cause Throwable e, Object deliverer);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222143, value = "Timed out waiting for executor to complete", format = Message.Format.MESSAGE_FORMAT)
void errorTransferringConsumer();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222144, value = "Queue could not finish waiting executors. Try increasing the thread pool size",
format = Message.Format.MESSAGE_FORMAT)
void errorFlushingExecutorsOnQueue();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222145, value = "Error expiring reference {0} 0n queue", format = Message.Format.MESSAGE_FORMAT)
void errorExpiringReferencesOnQueue(@Cause Exception e, MessageReference ref);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222146, value = "Message has expired. No bindings for Expiry Address {0} so dropping it", format = Message.Format.MESSAGE_FORMAT)
void errorExpiringReferencesNoBindings(SimpleString expiryAddress);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222147, value = "Messages are being expired on queue{0}. However there is no expiry queue configured, hence messages will be dropped.", format = Message.Format.MESSAGE_FORMAT)
void errorExpiringReferencesNoQueue(SimpleString name);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222148, value = "Message {0} has exceeded max delivery attempts. No bindings for Dead Letter Address {1} so dropping it",
format = Message.Format.MESSAGE_FORMAT)
void messageExceededMaxDelivery(MessageReference ref, SimpleString name);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222149, value = "Message {0} has reached maximum delivery attempts, sending it to Dead Letter Address {1} from {2}",
format = Message.Format.MESSAGE_FORMAT)
void messageExceededMaxDeliverySendtoDLA(MessageReference ref, SimpleString name, SimpleString simpleString);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222150, value = "Message {0} has exceeded max delivery attempts. No Dead Letter Address configured for queue {1} so dropping it",
format = Message.Format.MESSAGE_FORMAT)
void messageExceededMaxDeliveryNoDLA(MessageReference ref, SimpleString name);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222151, value = "removing consumer which did not handle a message, consumer={0}, message={1}",
format = Message.Format.MESSAGE_FORMAT)
void removingBadConsumer(@Cause Throwable e, Consumer consumer, MessageReference reference);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222152, value = "Unable to decrement reference counting on queue",
format = Message.Format.MESSAGE_FORMAT)
void errorDecrementingRefCount(@Cause Throwable e);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222153, value = "Unable to remove message id = {0} please remove manually",
format = Message.Format.MESSAGE_FORMAT)
void errorRemovingMessage(@Cause Throwable e, Long messageID);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222154, value = "Error checking DLQ",
format = Message.Format.MESSAGE_FORMAT)
void errorCheckingDLQ(@Cause Throwable e);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222155, value = "Failed to register as backup. Stopping the server.",
format = Message.Format.MESSAGE_FORMAT)
void errorRegisteringBackup();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222156, value = "Less than {0}%\n{1}\nYou are in danger of running out of RAM. Have you set paging parameters on your addresses? (See user manual \"Paging\" chapter)",
format = Message.Format.MESSAGE_FORMAT)
void memoryError(Integer memoryWarningThreshold, String info);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222157, value = "Error completing callback on replication manager",
format = Message.Format.MESSAGE_FORMAT)
void errorCompletingCallbackOnReplicationManager(@Cause Throwable e);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222158, value = "{0} activation thread did not finish.", format = Message.Format.MESSAGE_FORMAT)
void activationDidntFinish(ActiveMQServer server);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222159, value = "unable to send notification when broadcast group is stopped", format = Message.Format.MESSAGE_FORMAT)
void broadcastBridgeStoppedError(@Cause Exception e);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222160, value = "unable to send notification when broadcast group is stopped", format = Message.Format.MESSAGE_FORMAT)
void notificationBridgeStoppedError(@Cause Exception e);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222161, value = "Group Handler timed-out waiting for sendCondition", format = Message.Format.MESSAGE_FORMAT)
void groupHandlerSendTimeout();
@LogMessage(level = Logger.Level.INFO)
@Message(id = 222162, value = "Moving data directory {0} to {1}", format = Message.Format.MESSAGE_FORMAT)
void backupMovingDataAway(String oldPath, String newPath);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222163, value = "Server is being completely stopped, since this was a replicated backup there may be journal files that need cleaning up. The Apache ActiveMQ Artemis broker will have to be manually restarted.",
format = Message.Format.MESSAGE_FORMAT)
void stopReplicatedBackupAfterFailback();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222164, value = "Error when trying to start replication {0}", format = Message.Format.MESSAGE_FORMAT)
void errorStartingReplication(BackupReplicationStartFailedMessage.BackupRegistrationProblem problem);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222165, value = "No Dead Letter Address configured for queue {0} in AddressSettings",
format = Message.Format.MESSAGE_FORMAT)
void AddressSettingsNoDLA(SimpleString name);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222166, value = "No Expiry Address configured for queue {0} in AddressSettings",
format = Message.Format.MESSAGE_FORMAT)
void AddressSettingsNoExpiryAddress(SimpleString name);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222167, value = "Group Binding not available so deleting {0} groups from {1}, groups will be bound to another node",
format = Message.Format.MESSAGE_FORMAT)
void groupingQueueRemoved(int size, SimpleString clusterName);
@SuppressWarnings("deprecation")
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222168, value = "The ''" + TransportConstants.PROTOCOL_PROP_NAME + "'' property is deprecated. If you want this Acceptor to support multiple protocols, use the ''" + TransportConstants.PROTOCOLS_PROP_NAME + "'' property, e.g. with value ''CORE,AMQP,STOMP''",
format = Message.Format.MESSAGE_FORMAT)
void warnDeprecatedProtocol();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222169, value = "You have old legacy clients connected to the queue {0} and we can''t disconnect them, these clients may just hang",
format = Message.Format.MESSAGE_FORMAT)
void warnDisconnectOldClient(String queueName);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222170, value = "Bridge {0} forwarding address {1} has confirmation-window-size ({2}) greater than address'' max-size-bytes'' ({3})",
format = Message.Format.MESSAGE_FORMAT)
void bridgeConfirmationWindowTooSmall(String bridgeName, String address, int windowConfirmation, long maxSizeBytes);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222171, value = "Bridge {0} forwarding address {1} could not be resolved on address-settings configuration",
format = Message.Format.MESSAGE_FORMAT)
void bridgeCantFindAddressConfig(String bridgeName, String forwardingAddress);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222172, value = "Queue {0} was busy for more than {1} milliseconds. There are possibly consumers hanging on a network operation",
format = Message.Format.MESSAGE_FORMAT)
void queueBusy(String name, long timeout);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222173, value = "Queue {0} is duplicated during reload. This queue will be renamed as {1}", format = Message.Format.MESSAGE_FORMAT)
void queueDuplicatedRenaming(String name, String newName);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222174, value = "Queue {0}, on address={1}, is taking too long to flush deliveries. Watch out for frozen clients.", format = Message.Format.MESSAGE_FORMAT)
void timeoutFlushInTransit(String queueName, String addressName);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222175, value = "Bridge {0} could not find configured connectors", format = Message.Format.MESSAGE_FORMAT)
void bridgeCantFindConnectors(String bridgeName);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222176,
value = "A session that was already doing XA work on {0} is replacing the xid by {1} " + ". This was most likely caused from a previous communication timeout",
format = Message.Format.MESSAGE_FORMAT)
void xidReplacedOnXStart(String xidOriginalToString, String xidReplacedToString);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222177, value = "Wrong configuration for role, {0} is not a valid permission",
format = Message.Format.MESSAGE_FORMAT)
void rolePermissionConfigurationError(String permission);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222178, value = "Error during recovery of page counters",
format = Message.Format.MESSAGE_FORMAT)
void errorRecoveringPageCounter(@Cause Throwable error);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222181, value = "Unable to scaleDown messages", format = Message.Format.MESSAGE_FORMAT)
void failedToScaleDown(@Cause Throwable e);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222182, value = "Missing cluster-configuration for scale-down-clustername {0}", format = Message.Format.MESSAGE_FORMAT)
void missingClusterConfigForScaleDown(String scaleDownCluster);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222183, value = "Blocking message production on address ''{0}''; size is currently: {1} bytes; max-size-bytes on address: {2}, global-max-size is {3}", format = Message.Format.MESSAGE_FORMAT)
void blockingMessageProduction(SimpleString addressName, long currentSize, long maxSize, long globalMaxSize);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222184,
value = "Unable to recover group bindings in SCALE_DOWN mode, only FULL backup server can do this",
format = Message.Format.MESSAGE_FORMAT)
void groupBindingsOnRecovery();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222185,
value = "no cluster connection for specified replication cluster",
format = Message.Format.MESSAGE_FORMAT)
void noClusterConnectionForReplicationCluster();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222186,
value = "unable to authorise cluster control",
format = Message.Format.MESSAGE_FORMAT)
void clusterControlAuthfailure();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222187,
value = "Failed to activate replicated backup",
format = Message.Format.MESSAGE_FORMAT)
void activateReplicatedBackupFailed(@Cause Throwable e);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222188,
value = "Unable to find target queue for node {0}",
format = Message.Format.MESSAGE_FORMAT)
void unableToFindTargetQueue(String targetNodeID);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222189,
value = "Failed to activate shared store slave",
format = Message.Format.MESSAGE_FORMAT)
void activateSharedStoreSlaveFailed(@Cause Throwable e);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222191,
value = "Could not find any configured role for user {0}.",
format = Message.Format.MESSAGE_FORMAT)
void cannotFindRoleForUser(String user);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222192,
value = "Could not delete: {0}",
format = Message.Format.MESSAGE_FORMAT)
void couldNotDeleteTempFile(String tempFileName);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222193,
value = "Memory Limit reached. Producer ({0}) stopped to prevent flooding {1} (blocking for {2}s). See http://activemq.apache.org/producer-flow-control.html for more info.",
format = Message.Format.MESSAGE_FORMAT)
void memoryLimitReached(String producerID, String address, long duration);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222194,
value = "PageCursorInfo == null on address {0}, pos = {1}, queue = {2}.",
format = Message.Format.MESSAGE_FORMAT)
void nullPageCursorInfo(String address, String position, long id);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222195,
value = "Large message {0} wasn''t found when dealing with add pending large message",
format = Message.Format.MESSAGE_FORMAT)
void largeMessageNotFound(long id);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222196,
value = "Could not find binding with id={0} on routeFromCluster for message={1} binding = {2}",
format = Message.Format.MESSAGE_FORMAT)
void bindingNotFound(long id, String message, String binding);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222197,
value = "Internal error! Delivery logic has identified a non delivery and still handled a consumer!",
format = Message.Format.MESSAGE_FORMAT)
void nonDeliveryHandled();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222198,
value = "Could not flush ClusterManager executor ({0}) in 10 seconds, verify your thread pool size",
format = Message.Format.MESSAGE_FORMAT)
void couldNotFlushClusterManager(String manager);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222199,
value = "Thread dump: {0}",
format = Message.Format.MESSAGE_FORMAT)
void threadDump(String manager);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222200,
value = "Could not finish executor on {0}",
format = Message.Format.MESSAGE_FORMAT)
void couldNotFinishExecutor(String clusterConnection);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222201,
value = "Timed out waiting for activation to exit",
format = Message.Format.MESSAGE_FORMAT)
void activationTimeout();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222202,
value = "{0}: <{1}> should not be set to the same value as <{2}>. " +
"If a system is under high load, or there is a minor network delay, " +
"there is a high probability of a cluster split/failure due to connection timeout.",
format = Message.Format.MESSAGE_FORMAT)
void connectionTTLEqualsCheckPeriod(String connectionName, String ttl, String checkPeriod);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222203, value = "Classpath lacks a protocol-manager for protocol {0}, Protocol being ignored on acceptor {1}",
format = Message.Format.MESSAGE_FORMAT)
void noProtocolManagerFound(String protocol, String host);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222204, value = "Duplicated Acceptor {0} with parameters {1} classFactory={2} duplicated on the configuration", format = Message.Format.MESSAGE_FORMAT)
void duplicatedAcceptor(String name, String parameters, String classFactory);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222205, value = "OutOfMemoryError possible! There are currently {0} addresses with a total max-size-bytes of {1} bytes, but the maximum memory available is {2} bytes.", format = Message.Format.MESSAGE_FORMAT)
void potentialOOME(long addressCount, long totalMaxSizeBytes, long maxMemory);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222206, value = "Connection limit of {0} reached. Refusing connection from {1}.", format = Message.Format.MESSAGE_FORMAT)
void connectionLimitReached(long connectionsAllowed, String address);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222207, value = "The backup server is not responding promptly introducing latency beyond the limit. Replication server being disconnected now.",
format = Message.Format.MESSAGE_FORMAT)
void slowReplicationResponse();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222208, value = "SSL handshake failed for client from {0}: {1}.",
format = Message.Format.MESSAGE_FORMAT)
void sslHandshakeFailed(String clientAddress, String cause);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222209, value = "Could not contact group handler coordinator after 10 retries, message being routed without grouping information",
format = Message.Format.MESSAGE_FORMAT)
void impossibleToRouteGrouped();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222210, value = "Storage usage is beyond max-disk-usage. System will start blocking producers.",
format = Message.Format.MESSAGE_FORMAT)
void diskBeyondCapacity();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222211, value = "Storage is back to stable now, under max-disk-usage.",
format = Message.Format.MESSAGE_FORMAT)
void diskCapacityRestored();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222212, value = "Disk Full! Blocking message production on address ''{0}''. Clients will report blocked.", format = Message.Format.MESSAGE_FORMAT)
void blockingDiskFull(SimpleString addressName);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222213,
value = "There was an issue on the network, server is isolated!",
format = Message.Format.MESSAGE_FORMAT)
void serverIsolatedOnNetwork();
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222214,
value = "Destination {1} has an inconsistent and negative address size={0}.",
format = Message.Format.MESSAGE_FORMAT)
void negativeAddressSize(long size, String destination);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222215,
value = "Global Address Size has negative and inconsistent value as {0}",
format = Message.Format.MESSAGE_FORMAT)
void negativeGlobalAddressSize(long size);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222216, value = "Security problem while creating session: {0}", format = Message.Format.MESSAGE_FORMAT)
void securityProblemWhileCreatingSession(String message);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222217, value = "Cannot find connector-ref {0}. The cluster-connection {1} will not be deployed.", format = Message.Format.MESSAGE_FORMAT)
void connectorRefNotFound(String connectorRef, String clusterConnection);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 222218, value = "Server disconnecting: {0}", format = Message.Format.MESSAGE_FORMAT)
void disconnectCritical(String reason, @Cause Exception e);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224000, value = "Failure in initialisation", format = Message.Format.MESSAGE_FORMAT)
void initializationError(@Cause Throwable e);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224001, value = "Error deploying URI {0}", format = Message.Format.MESSAGE_FORMAT)
void errorDeployingURI(@Cause Throwable e, URI uri);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224002, value = "Error deploying URI", format = Message.Format.MESSAGE_FORMAT)
void errorDeployingURI(@Cause Throwable e);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224003, value = "Error undeploying URI {0}", format = Message.Format.MESSAGE_FORMAT)
void errorUnDeployingURI(@Cause Throwable e, URI a);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224005, value = "Unable to deploy node {0}", format = Message.Format.MESSAGE_FORMAT)
void unableToDeployNode(@Cause Exception e, Node node);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224006, value = "Invalid filter: {0}", format = Message.Format.MESSAGE_FORMAT)
void invalidFilter(SimpleString filter, @Cause Throwable cause);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224007, value = "page subscription = {0} error={1}", format = Message.Format.MESSAGE_FORMAT)
void pageSubscriptionError(IOCallback IOCallback, String error);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224008, value = "Failed to store id", format = Message.Format.MESSAGE_FORMAT)
void batchingIdError(@Cause Exception e);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224009, value = "Cannot find message {0}", format = Message.Format.MESSAGE_FORMAT)
void cannotFindMessage(Long id);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224010, value = "Cannot find queue messages for queueID={0} on ack for messageID={1}", format = Message.Format.MESSAGE_FORMAT)
void journalCannotFindQueue(Long queue, Long id);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224011, value = "Cannot find queue messages {0} for message {1} while processing scheduled messages", format = Message.Format.MESSAGE_FORMAT)
void journalCannotFindQueueScheduled(Long queue, Long id);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224012, value = "error releasing resources", format = Message.Format.MESSAGE_FORMAT)
void largeMessageErrorReleasingResources(@Cause Exception e);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224013, value = "failed to expire messages for queue", format = Message.Format.MESSAGE_FORMAT)
void errorExpiringMessages(@Cause Exception e);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224014, value = "Failed to close session", format = Message.Format.MESSAGE_FORMAT)
void errorClosingSession(@Cause Exception e);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224015, value = "Caught XA exception", format = Message.Format.MESSAGE_FORMAT)
void caughtXaException(@Cause Exception e);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224016, value = "Caught exception", format = Message.Format.MESSAGE_FORMAT)
void caughtException(@Cause Throwable e);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224017, value = "Invalid packet {0}", format = Message.Format.MESSAGE_FORMAT)
void invalidPacket(Packet packet);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224018, value = "Failed to create session", format = Message.Format.MESSAGE_FORMAT)
void failedToCreateSession(@Cause Exception e);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224019, value = "Failed to reattach session", format = Message.Format.MESSAGE_FORMAT)
void failedToReattachSession(@Cause Exception e);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224020, value = "Failed to handle create queue", format = Message.Format.MESSAGE_FORMAT)
void failedToHandleCreateQueue(@Cause Exception e);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224021, value = "Failed to decode packet", format = Message.Format.MESSAGE_FORMAT)
void errorDecodingPacket(@Cause Exception e);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224022, value = "Failed to execute failure listener", format = Message.Format.MESSAGE_FORMAT)
void errorCallingFailureListener(@Cause Throwable e);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224024, value = "Stomp Error, tx already exist! {0}", format = Message.Format.MESSAGE_FORMAT)
void stompErrorTXExists(String txID);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224027, value = "Failed to write to handler on invm connector {0}", format = Message.Format.MESSAGE_FORMAT)
void errorWritingToInvmConnector(@Cause Exception e, Runnable runnable);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224028, value = "Failed to stop acceptor {0}", format = Message.Format.MESSAGE_FORMAT)
void errorStoppingAcceptor(String name);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224029, value = "large message sync: largeMessage instance is incompatible with it, ignoring data", format = Message.Format.MESSAGE_FORMAT)
void largeMessageIncompatible();
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224030, value = "Could not cancel reference {0}", format = Message.Format.MESSAGE_FORMAT)
void errorCancellingRefOnBridge(@Cause Exception e, MessageReference ref2);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224031, value = "-------------------------------Stomp begin tx: {0}", format = Message.Format.MESSAGE_FORMAT)
void stompBeginTX(String txID);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224032, value = "Failed to pause bridge", format = Message.Format.MESSAGE_FORMAT)
void errorPausingBridge(@Cause Exception e);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224033, value = "Failed to broadcast connector configs", format = Message.Format.MESSAGE_FORMAT)
void errorBroadcastingConnectorConfigs(@Cause Exception e);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224034, value = "Failed to close consumer", format = Message.Format.MESSAGE_FORMAT)
void errorClosingConsumer(@Cause Exception e);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224035, value = "Failed to close cluster connection flow record", format = Message.Format.MESSAGE_FORMAT)
void errorClosingFlowRecord(@Cause Exception e);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224036, value = "Failed to update cluster connection topology", format = Message.Format.MESSAGE_FORMAT)
void errorUpdatingTopology(@Cause Exception e);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224037, value = "cluster connection Failed to handle message", format = Message.Format.MESSAGE_FORMAT)
void errorHandlingMessage(@Cause Exception e);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224038, value = "Failed to ack old reference", format = Message.Format.MESSAGE_FORMAT)
void errorAckingOldReference(@Cause Exception e);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224039, value = "Failed to expire message reference", format = Message.Format.MESSAGE_FORMAT)
void errorExpiringRef(@Cause Exception e);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224040, value = "Failed to remove consumer", format = Message.Format.MESSAGE_FORMAT)
void errorRemovingConsumer(@Cause Exception e);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224041, value = "Failed to deliver", format = Message.Format.MESSAGE_FORMAT)
void errorDelivering(@Cause Exception e);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224042, value = "Error while restarting the backup server: {0}", format = Message.Format.MESSAGE_FORMAT)
void errorRestartingBackupServer(@Cause Exception e, ActiveMQServer backup);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224043, value = "Failed to send forced delivery message", format = Message.Format.MESSAGE_FORMAT)
void errorSendingForcedDelivery(@Cause Exception e);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224044, value = "error acknowledging message", format = Message.Format.MESSAGE_FORMAT)
void errorAckingMessage(@Cause Exception e);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224045, value = "Failed to run large message deliverer", format = Message.Format.MESSAGE_FORMAT)
void errorRunningLargeMessageDeliverer(@Cause Exception e);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224046, value = "Exception while browser handled from {0}", format = Message.Format.MESSAGE_FORMAT)
void errorBrowserHandlingMessage(@Cause Exception e, MessageReference current);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224047, value = "Failed to delete large message file", format = Message.Format.MESSAGE_FORMAT)
void errorDeletingLargeMessageFile(@Cause Throwable e);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224048, value = "Failed to remove temporary queue {0}", format = Message.Format.MESSAGE_FORMAT)
void errorRemovingTempQueue(@Cause Exception e, SimpleString bindingName);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224049, value = "Cannot find consumer with id {0}", format = Message.Format.MESSAGE_FORMAT)
void cannotFindConsumer(long consumerID);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224050, value = "Failed to close connection {0}", format = Message.Format.MESSAGE_FORMAT)
void errorClosingConnection(ServerSessionImpl serverSession);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224051, value = "Failed to call notification listener", format = Message.Format.MESSAGE_FORMAT)
void errorCallingNotifListener(@Cause Exception e);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224052, value = "Unable to call Hierarchical Repository Change Listener", format = Message.Format.MESSAGE_FORMAT)
void errorCallingRepoListener(@Cause Throwable e);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224053, value = "failed to timeout transaction, xid:{0}", format = Message.Format.MESSAGE_FORMAT)
void errorTimingOutTX(@Cause Exception e, Xid xid);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224054, value = "exception while stopping the replication manager", format = Message.Format.MESSAGE_FORMAT)
void errorStoppingReplicationManager(@Cause Throwable t);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224055, value = "Bridge Failed to ack", format = Message.Format.MESSAGE_FORMAT)
void bridgeFailedToAck(@Cause Throwable t);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224056, value = "Live server will not fail-back automatically", format = Message.Format.MESSAGE_FORMAT)
void autoFailBackDenied();
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224057, value = "Backup server that requested fail-back was not announced. Server will not stop for fail-back.",
format = Message.Format.MESSAGE_FORMAT)
void failbackMissedBackupAnnouncement();
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224058, value = "Stopping ClusterManager. As it failed to authenticate with the cluster: {0}",
format = Message.Format.MESSAGE_FORMAT)
void clusterManagerAuthenticationError(String msg);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224059, value = "Invalid cipher suite specified. Supported cipher suites are: {0}", format = Message.Format.MESSAGE_FORMAT)
void invalidCipherSuite(String validSuites);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224060, value = "Invalid protocol specified. Supported protocols are: {0}", format = Message.Format.MESSAGE_FORMAT)
void invalidProtocol(String validProtocols);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224061, value = "Setting both <{0}> and <ha-policy> is invalid. Please use <ha-policy> exclusively as <{0}> is deprecated. Ignoring <{0}> value.", format = Message.Format.MESSAGE_FORMAT)
void incompatibleWithHAPolicy(String parameter);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224062, value = "Failed to send SLOW_CONSUMER notification: {0}", format = Message.Format.MESSAGE_FORMAT)
void failedToSendSlowConsumerNotification(Notification notification, @Cause Exception e);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224063, value = "Failed to close consumer connections for address {0}", format = Message.Format.MESSAGE_FORMAT)
void failedToCloseConsumerConnectionsForAddress(String address, @Cause Exception e);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224064, value = "Setting <{0}> is invalid with this HA Policy Configuration. Please use <ha-policy> exclusively or remove. Ignoring <{0}> value.", format = Message.Format.MESSAGE_FORMAT)
void incompatibleWithHAPolicyChosen(String parameter);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224065, value = "Failed to remove auto-created queue {0}", format = Message.Format.MESSAGE_FORMAT)
void errorRemovingAutoCreatedQueue(@Cause Exception e, SimpleString bindingName);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224066, value = "Error opening context for LDAP", format = Message.Format.MESSAGE_FORMAT)
void errorOpeningContextForLDAP(@Cause Exception e);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224067, value = "Error populating security roles from LDAP", format = Message.Format.MESSAGE_FORMAT)
void errorPopulatingSecurityRolesFromLDAP(@Cause Exception e);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224068, value = "Unable to stop component: {0}", format = Message.Format.MESSAGE_FORMAT)
void errorStoppingComponent(@Cause Throwable t, String componentClassName);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224069, value = "Change detected in broker configuration file, but reload failed", format = Message.Format.MESSAGE_FORMAT)
void configurationReloadFailed(@Cause Throwable t);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 224072, value = "Message Counter Sample Period too short: {0}", format = Message.Format.MESSAGE_FORMAT)
void invalidMessageCounterPeriod(long value);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 224073, value = "Using MAPPED Journal", format = Message.Format.MESSAGE_FORMAT)
void journalUseMAPPED();
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224074, value = "Failed to purge queue {0} on no consumers", format = Message.Format.MESSAGE_FORMAT)
void failedToPurgeQueue(@Cause Exception e, SimpleString bindingName);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224075, value = "Cannot find pageTX id = {0}", format = Message.Format.MESSAGE_FORMAT)
void journalCannotFindPageTX(Long id);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224079, value = "The process for the virtual machine will be killed, as component {0} is not responsive", format = Message.Format.MESSAGE_FORMAT)
void criticalSystemHalt(Object component);
@LogMessage(level = Logger.Level.ERROR)
@Message(id = 224080, value = "The server process will now be stopped, as component {0} is not responsive", format = Message.Format.MESSAGE_FORMAT)
void criticalSystemShutdown(Object component);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 224081, value = "The component {0} is not responsive", format = Message.Format.MESSAGE_FORMAT)
void criticalSystemLog(Object component);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 224076, value = "UnDeploying address {0}", format = Message.Format.MESSAGE_FORMAT)
void undeployAddress(SimpleString addressName);
@LogMessage(level = Logger.Level.INFO)
@Message(id = 224077, value = "UnDeploying queue {0}", format = Message.Format.MESSAGE_FORMAT)
void undeployQueue(SimpleString queueName);
@LogMessage(level = Logger.Level.WARN)
@Message(id = 224078, value = "The size of duplicate cache detection (<id_cache-size/>) appears to be too large {0}. It should be no greater than the number of messages that can be squeezed into conformation buffer (<confirmation-window-size/>) {1}.", format = Message.Format.MESSAGE_FORMAT)
void duplicateCacheSizeWarning(int idCacheSize, int confirmationWindowSize);
}<|fim▁end|>
|
@LogMessage(level = Logger.Level.WARN)
|
<|file_name|>simple_modis_algorithms.py<|end_file_name|><|fim▁begin|># -----------------------------------------------------------------------------
# Copyright * 2014, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The Crisis Mapping Toolkit (CMT) v1 platform is licensed under the Apache
# License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.<|fim▁hole|># License for the specific language governing permissions and limitations under
# the License.
# -----------------------------------------------------------------------------
import ee
import math
from cmt.mapclient_qt import addToMap
from cmt.util.miscUtilities import safe_get_info
import modis_utilities
'''
Contains implementations of several simple MODIS-based flood detection algorithms.
'''
#==============================================================
def dem_threshold(domain, b):
'''Just use a height threshold on the DEM!'''
heightLevel = float(domain.algorithm_params['dem_threshold'])
dem = domain.get_dem().image
return dem.lt(heightLevel).select(['elevation'], ['b1'])
#==============================================================
def evi(domain, b):
'''Simple EVI based classifier'''
#no_clouds = b['b3'].lte(2100).select(['sur_refl_b03'], ['b1'])
criteria1 = b['EVI'].lte(0.3).And(b['LSWI'].subtract(b['EVI']).gte(0.05)).select(['sur_refl_b02'], ['b1'])
criteria2 = b['EVI'].lte(0.05).And(b['LSWI'].lte(0.0)).select(['sur_refl_b02'], ['b1'])
#return no_clouds.And(criteria1.Or(criteria2))
return criteria1.Or(criteria2)
def xiao(domain, b):
'''Method from paper: Xiao, Boles, Frolking, et. al. Mapping paddy rice agriculture in South and Southeast Asia using
multi-temporal MODIS images, Remote Sensing of Environment, 2006.
This method implements a very simple decision tree from several standard MODIS data products.
The default constants were tuned for (wet) rice paddy detection.
'''
return b['LSWI'].subtract(b['NDVI']).gte(0.05).Or(b['LSWI'].subtract(b['EVI']).gte(0.05)).select(['sur_refl_b02'], ['b1']);
#==============================================================
def get_diff(b):
'''Just the internals of the difference method'''
return b['b2'].subtract(b['b1']).select(['sur_refl_b02'], ['b1'])
def diff_learned(domain, b):
'''modis_diff but with the threshold calculation included (training image required)'''
if domain.unflooded_domain == None:
print('No unflooded training domain provided.')
return None
unflooded_b = modis_utilities.compute_modis_indices(domain.unflooded_domain)
water_mask = modis_utilities.get_permanent_water_mask()
threshold = modis_utilities.compute_binary_threshold(get_diff(unflooded_b), water_mask, domain.bounds)
return modis_diff(domain, b, threshold)
def modis_diff(domain, b, threshold=None):
'''Compute (b2-b1) < threshold, a simple water detection index.
This method may be all that is needed in cases where the threshold can be hand tuned.
'''
if threshold == None: # If no threshold value passed in, load it based on the data set.
threshold = float(domain.algorithm_params['modis_diff_threshold'])
return get_diff(b).lte(threshold)
#==============================================================
def get_dartmouth(b):
A = 500
B = 2500
return b['b2'].add(A).divide(b['b1'].add(B)).select(['sur_refl_b02'], ['b1'])
def dart_learned(domain, b):
'''The dartmouth method but with threshold calculation included (training image required)'''
if domain.unflooded_domain == None:
print('No unflooded training domain provided.')
return None
unflooded_b = modis_utilities.compute_modis_indices(domain.unflooded_domain)
water_mask = modis_utilities.get_permanent_water_mask()
threshold = modis_utilities.compute_binary_threshold(get_dartmouth(unflooded_b), water_mask, domain.bounds)
return dartmouth(domain, b, threshold)
def dartmouth(domain, b, threshold=None):
'''A flood detection method from the Dartmouth Flood Observatory.
This method is a refinement of the simple b2-b1 detection method.
'''
if threshold == None:
threshold = float(domain.algorithm_params['dartmouth_threshold'])
return get_dartmouth(b).lte(threshold)
#==============================================================
def get_mod_ndwi(b):
return b['b6'].subtract(b['b4']).divide(b['b4'].add(b['b6'])).select(['sur_refl_b06'], ['b1'])
def mod_ndwi_learned(domain, b):
if domain.unflooded_domain == None:
print('No unflooded training domain provided.')
return None
unflooded_b = modis_utilities.compute_modis_indices(domain.unflooded_domain)
water_mask = modis_utilities.get_permanent_water_mask()
threshold = modis_utilities.compute_binary_threshold(get_mod_ndwi(unflooded_b), water_mask, domain.bounds)
return mod_ndwi(domain, b, threshold)
def mod_ndwi(domain, b, threshold=None):
if threshold == None:
threshold = float(domain.algorithm_params['mod_ndwi_threshold'])
return get_mod_ndwi(b).lte(threshold)
#==============================================================
def get_fai(b):
'''Just the internals of the FAI method'''
return b['b2'].subtract(b['b1'].add(b['b5'].subtract(b['b1']).multiply((859.0 - 645) / (1240 - 645)))).select(['sur_refl_b02'], ['b1'])
def fai_learned(domain, b):
if domain.unflooded_domain == None:
print('No unflooded training domain provided.')
return None
unflooded_b = modis_utilities.compute_modis_indices(domain.unflooded_domain)
water_mask = modis_utilities.get_permanent_water_mask()
threshold = modis_utilities.compute_binary_threshold(get_fai(unflooded_b), water_mask, domain.bounds)
return fai(domain, b, threshold)
def fai(domain, b, threshold=None):
''' Floating Algae Index. Method from paper: Feng, Hu, Chen, Cai, Tian, Gan,
Assessment of inundation changes of Poyang Lake using MODIS observations
between 2000 and 2010. Remote Sensing of Environment, 2012.
'''
if threshold == None:
threshold = float(domain.algorithm_params['fai_threshold'])
return get_fai(b).lte(threshold)<|fim▁end|>
|
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
<|file_name|>managers.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
from django.db import models
from django.db.models import Q
from django.contrib.auth.models import (
Permission,
User
)
class GymManager(models.Manager):
'''
Custom query manager for Gyms
'''
def get_members(self, gym_pk):
'''
Returns all members for this gym (i.e non-admin ones)
'''
perm_gym = Permission.objects.get(codename='manage_gym')
perm_gyms = Permission.objects.get(codename='manage_gyms')
perm_trainer = Permission.objects.get(codename='gym_trainer')
users = User.objects.filter(userprofile__gym_id=gym_pk)
return users.exclude(Q(groups__permissions=perm_gym) |
Q(groups__permissions=perm_gyms) |
Q(groups__permissions=perm_trainer)).distinct()
def get_admins(self, gym_pk):
'''
Returns all admins for this gym (i.e trainers, managers, etc.)
'''
perm_gym = Permission.objects.get(codename='manage_gym')
perm_gyms = Permission.objects.get(codename='manage_gyms')
perm_trainer = Permission.objects.get(codename='gym_trainer')
users = User.objects.filter(userprofile__gym_id=gym_pk)<|fim▁hole|> Q(groups__permissions=perm_trainer)).distinct()<|fim▁end|>
|
return users.filter(Q(groups__permissions=perm_gym) |
Q(groups__permissions=perm_gyms) |
|
<|file_name|>convert.py<|end_file_name|><|fim▁begin|># encoding: utf-8
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski ([email protected])
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import absolute_import
import HTMLParser
import StringIO
import ast
import base64
import cgi
from collections import Mapping
import datetime
from decimal import Decimal
import gzip
import hashlib
from io import BytesIO
import json
import re
from tempfile import TemporaryFile
from pyLibrary import strings
from pyLibrary.dot import wrap, wrap_leaves, unwrap, unwraplist, split_field, join_field, coalesce
from pyLibrary.collections.multiset import Multiset
from pyLibrary.debugs.logs import Log, Except
from pyLibrary.env.big_data import FileString, safe_size
from pyLibrary.jsons import quote
from pyLibrary.jsons.encoder import json_encoder, pypy_json_encode
from pyLibrary.strings import expand_template
from pyLibrary.times.dates import Date
"""
DUE TO MY POOR MEMORY, THIS IS A LIST OF ALL CONVERSION ROUTINES
IN <from_type> "2" <to_type> FORMAT
"""
def value2json(obj, pretty=False):
try:
json = json_encoder(obj, pretty=pretty)
if json == None:
Log.note(str(type(obj)) + " is not valid{{type}}JSON", type= " (pretty) " if pretty else " ")
Log.error("Not valid JSON: " + str(obj) + " of type " + str(type(obj)))
return json
except Exception, e:
e = Except.wrap(e)
try:
json = pypy_json_encode(obj)
return json
except Exception:
pass
Log.error("Can not encode into JSON: {{value}}", value=repr(obj), cause=e)
def remove_line_comment(line):
mode = 0 # 0=code, 1=inside_string, 2=escaping
for i, c in enumerate(line):
if c == '"':
if mode == 0:
mode = 1
elif mode == 1:
mode = 0
else:
mode = 1
elif c == '\\':
if mode == 0:
mode = 0
elif mode == 1:
mode = 2
else:
mode = 1
elif mode == 2:
mode = 1
elif c == "#" and mode == 0:
return line[0:i]
elif c == "/" and mode == 0 and line[i + 1] == "/":
return line[0:i]
return line
def json2value(json_string, params={}, flexible=False, leaves=False):
"""
:param json_string: THE JSON
:param params: STANDARD JSON PARAMS
:param flexible: REMOVE COMMENTS
:param leaves: ASSUME JSON KEYS ARE DOT-DELIMITED
:return: Python value
"""
if isinstance(json_string, str):
Log.error("only unicode json accepted")
try:
if flexible:
# REMOVE """COMMENTS""", # COMMENTS, //COMMENTS, AND \n \r
# DERIVED FROM https://github.com/jeads/datasource/blob/master/datasource/bases/BaseHub.py# L58
json_string = re.sub(r"\"\"\".*?\"\"\"", r"\n", json_string, flags=re.MULTILINE)
json_string = "\n".join(remove_line_comment(l) for l in json_string.split("\n"))
# ALLOW DICTIONARY'S NAME:VALUE LIST TO END WITH COMMA
json_string = re.sub(r",\s*\}", r"}", json_string)
# ALLOW LISTS TO END WITH COMMA
json_string = re.sub(r",\s*\]", r"]", json_string)
if params:
json_string = expand_template(json_string, params)
# LOOKUP REFERENCES
value = wrap(json_decoder(json_string))
if leaves:
value = wrap_leaves(value)
return value
except Exception, e:
e = Except.wrap(e)
if "Expecting '" in e and "' delimiter: line" in e:
line_index = int(strings.between(e.message, " line ", " column ")) - 1
column = int(strings.between(e.message, " column ", " ")) - 1
line = json_string.split("\n")[line_index].replace("\t", " ")
if column > 20:
sample = "..." + line[column - 20:]
pointer = " " + (" " * 20) + "^"
else:
sample = line
pointer = (" " * column) + "^"
if len(sample) > 43:
sample = sample[:43] + "..."
Log.error("Can not decode JSON at:\n\t" + sample + "\n\t" + pointer + "\n")
base_str = unicode2utf8(strings.limit(json_string, 1000))
hexx_str = bytes2hex(base_str, " ")
try:
char_str = " " + (" ".join(c.decode("latin1") if ord(c) >= 32 else ".") for c in base_str)
except Exception:
char_str = " "
Log.error("Can not decode JSON:\n" + char_str + "\n" + hexx_str + "\n", e)
def string2datetime(value, format=None):
return Date(value, format).value
def str2datetime(value, format=None):
return string2datetime(value, format)
def datetime2string(value, format="%Y-%m-%d %H:%M:%S"):
return Date(value).format(format=format)
def datetime2str(value, format="%Y-%m-%d %H:%M:%S"):
return Date(value).format(format=format)
def datetime2unix(d):
try:
if d == None:
return None
elif isinstance(d, datetime.datetime):
epoch = datetime.datetime(1970, 1, 1)
elif isinstance(d, datetime.date):
epoch = datetime.date(1970, 1, 1)
else:
Log.error("Can not convert {{value}} of type {{type}}", value= d, type= d.__class__)
diff = d - epoch
return Decimal(long(diff.total_seconds() * 1000000)) / 1000000
except Exception, e:
Log.error("Can not convert {{value}}", value= d, cause=e)
def datetime2milli(d):
return datetime2unix(d) * 1000
def timedelta2milli(v):
return v.total_seconds()
def unix2datetime(u):
try:
if u == None:
return None
if u == 9999999999: # PYPY BUG https://bugs.pypy.org/issue1697
return datetime.datetime(2286, 11, 20, 17, 46, 39)
return datetime.datetime.utcfromtimestamp(u)
except Exception, e:
Log.error("Can not convert {{value}} to datetime", value= u, cause=e)
def milli2datetime(u):
if u == None:
return None
return unix2datetime(u / 1000.0)
def dict2Multiset(dic):
if dic == None:
return None
output = Multiset()
output.dic = unwrap(dic).copy()
return output
def multiset2dict(value):
"""
CONVERT MULTISET TO dict THAT MAPS KEYS TO MAPS KEYS TO KEY-COUNT
"""
if value == None:
return None
return dict(value.dic)
def table2list(
column_names, # tuple of columns names
rows # list of tuples
):
return wrap([dict(zip(column_names, r)) for r in rows])
def table2tab(
column_names, # tuple of columns names
rows # list of tuples
):
def row(r):
return "\t".join(map(value2json, r))
return row(column_names)+"\n"+("\n".join(row(r) for r in rows))
def list2tab(rows):
columns = set()
for r in wrap(rows):
columns |= set(k for k, v in r.leaves())
keys = list(columns)
output = []
for r in wrap(rows):
output.append("\t".join(value2json(r[k]) for k in keys))
return "\t".join(keys) + "\n" + "\n".join(output)
def list2table(rows, column_names=None):
if column_names:
keys = list(set(column_names))
else:
columns = set()
for r in rows:
columns |= set(r.keys())
keys = list(columns)
output = [[unwraplist(r[k]) for k in keys] for r in rows]
return wrap({
"meta": {"format": "table"},
"header": keys,
"data": output
})
def list2cube(rows, column_names=None):
if column_names:
keys = column_names
else:
columns = set()
for r in rows:
columns |= set(r.keys())
keys = list(columns)
data = {k: [] for k in keys}
output = wrap({
"meta": {"format": "cube"},
"edges": [
{
"name": "rownum",
"domain": {"type": "rownum", "min": 0, "max": len(rows), "interval": 1}
}
],
"data": data
})
for r in rows:
for k in keys:
data[k].append(r[k])
return output
def value2string(value):
# PROPER NULL HANDLING
if value == None:<|fim▁hole|>
def value2quote(value):
# RETURN PRETTY PYTHON CODE FOR THE SAME
if isinstance(value, basestring):
return string2quote(value)
else:
return repr(value)
def string2quote(value):
if value == None:
return "None"
return quote(value)
string2regexp = re.escape
def string2url(value):
if isinstance(value, unicode):
return "".join([_map2url[c] for c in unicode2latin1(value)])
elif isinstance(value, str):
return "".join([_map2url[c] for c in value])
else:
Log.error("Expecting a string")
def value2url(value):
if value == None:
Log.error("")
if isinstance(value, Mapping):
output = "&".join([value2url(k) + "=" + (value2url(v) if isinstance(v, basestring) else value2url(value2json(v))) for k,v in value.items()])
elif isinstance(value, unicode):
output = "".join([_map2url[c] for c in unicode2latin1(value)])
elif isinstance(value, str):
output = "".join([_map2url[c] for c in value])
elif hasattr(value, "__iter__"):
output = ",".join(value2url(v) for v in value)
else:
output = unicode(value)
return output
def url_param2value(param):
"""
CONVERT URL QUERY PARAMETERS INTO DICT
"""
if isinstance(param, unicode):
param = param.encode("ascii")
def _decode(v):
output = []
i = 0
while i < len(v):
c = v[i]
if c == "%":
d = hex2bytes(v[i + 1:i + 3])
output.append(d)
i += 3
else:
output.append(c)
i += 1
output = (b"".join(output)).decode("latin1")
try:
return json2value(output)
except Exception:
pass
return output
query = {}
for p in param.split(b'&'):
if not p:
continue
if p.find(b"=") == -1:
k = p
v = True
else:
k, v = p.split(b"=")
v = _decode(v)
u = query.get(k)
if u is None:
query[k] = v
elif isinstance(u, list):
u += [v]
else:
query[k] = [u, v]
return query
def html2unicode(value):
# http://stackoverflow.com/questions/57708/convert-xml-html-entities-into-unicode-string-in-python
return HTMLParser.HTMLParser().unescape(value)
def unicode2html(value):
return cgi.escape(value)
def unicode2latin1(value):
output = value.encode("latin1")
return output
def quote2string(value):
try:
return ast.literal_eval(value)
except Exception:
pass
# RETURN PYTHON CODE FOR THE SAME
def value2code(value):
return repr(value)
def DataFrame2string(df, columns=None):
output = StringIO.StringIO()
try:
df.to_csv(output, sep="\t", header=True, cols=columns, engine='python')
return output.getvalue()
finally:
output.close()
def ascii2char(ascii):
return chr(ascii)
def char2ascii(char):
return ord(char)
def ascii2unicode(value):
return value.decode("latin1")
def latin12hex(value):
return value.encode("hex")
def int2hex(value, size):
return (("0" * size) + hex(value)[2:])[-size:]
def hex2bytes(value):
return value.decode("hex")
def bytes2hex(value, separator=" "):
return separator.join("%02X" % ord(x) for x in value)
def base642bytearray(value):
return bytearray(base64.b64decode(value))
def base642bytes(value):
return base64.b64decode(value)
def bytes2base64(value):
return base64.b64encode(value).decode("utf8")
def bytes2sha1(value):
if isinstance(value, unicode):
Log.error("can not convert unicode to sha1")
sha = hashlib.sha1(value)
return sha.hexdigest()
def value2intlist(value):
if value == None:
return None
elif hasattr(value, '__iter__'):
output = [int(d) for d in value if d != "" and d != None]
return output
elif value.strip() == "":
return None
else:
return [int(value)]
def value2int(value):
if value == None:
return None
else:
return int(value)
def value2number(v):
try:
if isinstance(v, float) and round(v, 0) != v:
return v
# IF LOOKS LIKE AN INT, RETURN AN INT
return int(v)
except Exception:
try:
return float(v)
except Exception, e:
Log.error("Not a number ({{value}})", value= v, cause=e)
def utf82unicode(value):
return value.decode('utf8')
def unicode2utf8(value):
return value.encode('utf8')
def latin12unicode(value):
if isinstance(value, unicode):
Log.error("can not convert unicode from latin1")
try:
return unicode(value.decode('iso-8859-1'))
except Exception, e:
Log.error("Can not convert {{value|quote}} to unicode", value=value)
def pipe2value(value):
type = value[0]
if type == '0':
return None
if type == 'n':
return value2number(value[1::])
if type != 's' and type != 'a':
Log.error("unknown pipe type ({{type}}) in {{value}}", type= type, value= value)
# EXPECTING MOST STRINGS TO NOT HAVE ESCAPED CHARS
output = _unPipe(value)
if type == 's':
return output
return [pipe2value(v) for v in output.split("|")]
def zip2bytes(compressed):
"""
UNZIP DATA
"""
if hasattr(compressed, "read"):
return gzip.GzipFile(fileobj=compressed, mode='r')
buff = BytesIO(compressed)
archive = gzip.GzipFile(fileobj=buff, mode='r')
return safe_size(archive)
def bytes2zip(bytes):
"""
RETURN COMPRESSED BYTES
"""
if hasattr(bytes, "read"):
buff = TemporaryFile()
archive = gzip.GzipFile(fileobj=buff, mode='w')
for b in bytes:
archive.write(b)
archive.close()
buff.seek(0)
return FileString(buff)
buff = BytesIO()
archive = gzip.GzipFile(fileobj=buff, mode='w')
archive.write(bytes)
archive.close()
return buff.getvalue()
def ini2value(ini_content):
"""
INI FILE CONTENT TO Dict
"""
from ConfigParser import ConfigParser
buff = StringIO.StringIO(ini_content)
config = ConfigParser()
config._read(buff, "dummy")
output = {}
for section in config.sections():
output[section]=s = {}
for k, v in config.items(section):
s[k]=v
return wrap(output)
_map2url = {chr(i): latin12unicode(chr(i)) for i in range(32, 256)}
for c in " {}<>;/?:@&=+$,":
_map2url[c] = "%" + int2hex(ord(c), 2)
def _unPipe(value):
s = value.find("\\", 1)
if s < 0:
return value[1::]
result = ""
e = 1
while True:
c = value[s + 1]
if c == 'p':
result = result + value[e:s] + '|'
s += 2
e = s
elif c == '\\':
result = result + value[e:s] + '\\'
s += 2
e = s
else:
s += 1
s = value.find("\\", s)
if s < 0:
break
return result + value[e::]
json_decoder = json.JSONDecoder().decode
def json_schema_to_markdown(schema):
from pyLibrary.queries import qb
def _md_code(code):
return "`"+code+"`"
def _md_italic(value):
return "*"+value+"*"
def _inner(schema, parent_name, indent):
more_lines = []
for k,v in schema.items():
full_name = join_field(split_field(parent_name)+[k])
details = indent+"* "+_md_code(full_name)
if v.type:
details += " - "+_md_italic(v.type)
else:
Log.error("{{full_name}} is missing type", full_name=full_name)
if v.description:
details += " " + v.description
more_lines.append(details)
if v.type in ["object", "array", "nested"]:
more_lines.extend(_inner(v.properties, full_name, indent+" "))
return more_lines
lines = []
if schema.title:
lines.append("#"+schema.title)
lines.append(schema.description)
lines.append("")
for k, v in qb.sort(schema.properties.items(), 0):
full_name = k
if v.type in ["object", "array", "nested"]:
lines.append("##"+_md_code(full_name)+" Property")
if v.description:
lines.append(v.description)
lines.append("")
if v.type in ["object", "array", "nested"]:
lines.extend(_inner(v.properties, full_name, " "))
else:
lines.append("##"+_md_code(full_name)+" ("+v.type+")")
if v.description:
lines.append(v.description)
return "\n".join(lines)<|fim▁end|>
|
return None
return unicode(value)
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|>
|
__author__ = 'zhonghong'
|
<|file_name|>table.SQL.py<|end_file_name|><|fim▁begin|><|fim▁hole|># CREATE TABLE and INSERT (possible).
###
import os, sys
try:
from MonetDBtesting import process
except ImportError:
import process
clt = process.client('sql', user = 'my_user', passwd = 'p1',
stdin = open(os.path.join(os.getenv('RELSRCDIR'), os.pardir, 'table.sql')),
stdout = process.PIPE, stderr = process.PIPE)
out, err = clt.communicate()
sys.stdout.write(out)
sys.stderr.write(err)<|fim▁end|>
|
###
# SET a GRANTed ROLE for a USER (possible).
|
<|file_name|>exports.js<|end_file_name|><|fim▁begin|>define('exports@*', [], function(require, exports, module){
<|fim▁hole|><|fim▁end|>
|
exports.a = 1;
});
|
<|file_name|>store_others.go<|end_file_name|><|fim▁begin|>//go:build !windows
// +build !windows
package fs
import (
"os"<|fim▁hole|>
func notEmptyErr(err error) bool {
return err.(*os.PathError).Err == syscall.ENOTEMPTY
}<|fim▁end|>
|
"syscall"
)
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|>from __future__ import unicode_literals, print_function, division<|fim▁end|>
|
# -*- coding: UTF-8 -*-
|
<|file_name|>index.js<|end_file_name|><|fim▁begin|><|fim▁hole|>import styles from './index.css'
import React from 'react'
export default React.createClass({
propTypes: {
isLoading: React.PropTypes.bool.isRequired
},
render() {
return (
<div className={
this.props.isLoading === true ? styles.show : styles.hide
}></div>
)
}
})<|fim▁end|>
| |
<|file_name|>getMethod.js<|end_file_name|><|fim▁begin|><|fim▁hole|>
var fs = require('fs');
var path = require('path');
var getRoutes = {};
getRoutes['/'] = require('./get/index.js').getPage;
getRoutes['/level'] = require('./get/level.js').getPage;
getRoutes['/play'] = require('./get/play.js').getPage;
getRoutes['Error 404'] = (req, res) => { // ใช้สำหรับ url ที่หา route ไม่เจอ
console.log(' - ERROR 404 : ' + req.url + ' not found!!');
var data = '<h1>Error 404 : ' + req.url + ' not found!!</h1>';
res.writeHead(404, {
'Content-Type': 'text/html',
'Content-Length': data.length
});
res.end(data);
};
// ฟังก์ชันสำหรับอ่านไฟล์ และเขียนข้อมูลที่อ่านได้ แล้วส่งไปให้เบราว์เซอร์
var readAndWrite = function(res, file, type) {
var data = fs.readFileSync(file);
res.writeHead(200, {
'Content-Type': type,
'Content-Length': data.length
});
res.end(data);
};
// เพิ่ม routes ของไฟล์ css ทั้งหมด
var files = fs.readdirSync('./view/css'); // หาไฟล์ css ทั้งหมด
files.forEach(file => {
getRoutes['/css/' + file] = (req, res) => { //เพิ่ม route ให้กับไฟล์ css
readAndWrite(res, './view/css/' + file, 'text/css') // อ่านและเขียนไฟล์ css
};
});
// เพิ่ม routes ของไฟล์ js ทั้งหมด
files = fs.readdirSync('./view/js'); // หาไฟล์ js ทั้งหมด
files.forEach(file => {
getRoutes['/js/' + file] = (req, res) => { //เพิ่ม route ให้กับไฟล์ js
readAndWrite(res, './view/js/' + file, 'application/javascript') // อ่านและเขียนไฟล์ js
};
});
// เพิ่ม routes ของไฟล์ภาพทั้งหมด
files = fs.readdirSync('./view/img'); // หาไฟล์ภาพทั้งหมด
files.forEach(file => {
getRoutes['/img/' + file] = (req, res) => { //เพิ่ม route ให้กับไฟล์ภาพ
var ext = path.extname(file).toLowerCase(); // หานามสกุลของภาพ
ext = ext.substr(1, ext.length - 1); // ตัด "." (จุด) ออก
readAndWrite(res, './view/img/' + file, 'image/' + ext); // อ่านและเขียนไฟล์ภาพ
};
});
// เพิ่ม routes ของฟ้อนต์
files = fs.readdirSync('./view/font'); // หาaฟ้อนต์ทั้งหมด
files.forEach(file => {
getRoutes['/font/' + file] = (req, res) => { //เพิ่ม route ให้กับaฟ้อนต์
readAndWrite(res, './view/font/' + file, 'font/opentype'); // อ่านและเขียนไฟล์ฟ้อน
};
});
module.exports = {
routes: getRoutes
};<|fim▁end|>
|
/*
module ในการเก็บ route ใน เมธอด GET
*/
|
<|file_name|>test_expressions.py<|end_file_name|><|fim▁begin|>import vtrace.tests as vt_tests<|fim▁hole|>breakpoints = {
'windows': 'ntdll.NtTerminateProcess',
'linux': 'libc.exit',
'freebsd': 'libc.exit',
}
class VtraceExpressionTest(vt_tests.VtraceProcessTest):
def test_vtrace_sym(self):
plat = self.trace.getMeta('Platform')
symname = breakpoints.get(plat)
entry = self.trace.parseExpression(symname)
addEntry = self.trace.parseExpression(symname + " + 5")
self.assertTrue(entry + 5 == addEntry)
def test_baselib(self):
plat = self.trace.getMeta('Platform')
libname = breakpoints.get(plat).split('.')[0]
entry = self.trace.parseExpression(libname)
addEntry = self.trace.parseExpression(libname + " + 5")
# grab a symbol in the library and compare offsets against that?
self.assertTrue(entry + 5 == addEntry)<|fim▁end|>
| |
<|file_name|>demo_multiple_scenes.py<|end_file_name|><|fim▁begin|>#
# cocos2d
# http://python.cocos2d.org
#
from __future__ import division, print_function, unicode_literals
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
import cocos
from cocos.director import director
from cocos.actions import *
from cocos.layer import *
from cocos.sprite import Sprite
class TestLayer(cocos.layer.Layer):
def __init__(self):
super(TestLayer, self).__init__()
x, y = director.get_window_size()
sprite1 = Sprite('grossini.png')
sprite2 = Sprite('grossinis_sister1.png')
sprite3 = Sprite('grossinis_sister2.png')
sprite1.position = (x // 2, y // 2)
sprite2.position = (x // 4, y // 2)
sprite3.position = (3 * x / 4.0, y // 2)
self.add(sprite2)
self.add(sprite1)
self.add(sprite3)
sprite1.do(RotateBy(360, 1) * 16)
sprite2.do(RotateBy(-360, 1) * 16)
sprite3.do(RotateBy(-360, 1) * 16)
if __name__ == "__main__":
director.init(resizable=True)
main_scene = cocos.scene.Scene()
main_scene.transform_anchor = (320, 240)
child1_scene = cocos.scene.Scene()
child2_scene = cocos.scene.Scene()
child3_scene = cocos.scene.Scene()
child4_scene = cocos.scene.Scene()
sprites = TestLayer()
sprites.transform_anchor = 320, 240
child1_scene.add(ColorLayer(0, 0, 255, 255))
child1_scene.add(sprites)
child1_scene.scale = 1.5
child1_scene.position = (-160, -120)
child1_scene.transform_anchor = (320, 240)
child2_scene.add(ColorLayer(0, 255, 0, 255))
child2_scene.add(sprites)
child2_scene.scale = 1.5
child2_scene.position = (160, 120)
child2_scene.transform_anchor = (320, 240)
child3_scene.add(ColorLayer(255, 0, 0, 255))
child3_scene.add(sprites)
child3_scene.scale = 1.5
child3_scene.position = (-160, 120)
child3_scene.transform_anchor = (320, 240)
child4_scene.add(ColorLayer(255, 255, 255, 255))
child4_scene.add(sprites)
child4_scene.scale = 1.5
child4_scene.position = (160, -120)
child4_scene.transform_anchor = (320, 240)
main_scene.add(child1_scene)
main_scene.add(child2_scene)
main_scene.add(child3_scene)
main_scene.add(child4_scene)
rot = RotateBy(-360, 2)
rot2 = RotateBy(360, 4)
sleep = Delay(2)
sleep2 = Delay(2)
sc1 = ScaleTo(0.5, 0.5) + Delay(1.5)
sc2 = Delay(0.5) + ScaleTo(0.5, 0.5) + Delay(1.0)
sc3 = Delay(1.0) + ScaleTo(0.5, 0.5) + Delay(0.5)
sc4 = Delay(1.5) + ScaleTo(0.5, 0.5)
child1_scene.do(sc4 + sleep + rot + sleep + rot + rot)
child2_scene.do(sc3 + sleep + rot + sleep + rot + Reverse(rot))
child3_scene.do(sc2 + sleep + rot + sleep + rot + Reverse(rot))
child4_scene.do(sc1 + sleep + rot + sleep + rot + rot)
main_scene.do(sleep + Reverse(rot) * 2 + rot * 2 + sleep)
sprites.do(Delay(4) + rot2 * 3)<|fim▁hole|><|fim▁end|>
|
director.run(main_scene)
|
<|file_name|>bitcoin_fr.ts<|end_file_name|><|fim▁begin|><TS language="fr" version="2.1">
<context>
<name>AddressBookPage</name>
<message>
<source>Right-click to edit address or label</source>
<translation>Cliquer à droite pour modifier l'adresse ou l'étiquette</translation>
</message>
<message>
<source>Create a new address</source>
<translation>Créer une nouvelle adresse</translation>
</message>
<message>
<source>&New</source>
<translation>&Nouveau</translation>
</message>
<message>
<source>Copy the currently selected address to the system clipboard</source>
<translation>Copier l'adresse sélectionnée actuellement dans le presse-papiers</translation>
</message>
<message>
<source>&Copy</source>
<translation>&Copier</translation>
</message>
<message>
<source>C&lose</source>
<translation>&Fermer</translation>
</message>
<message>
<source>Delete the currently selected address from the list</source>
<translation>Supprimer de la liste l'adresse sélectionnée actuellement</translation>
</message>
<message>
<source>Export the data in the current tab to a file</source>
<translation>Exporter les données de l'onglet actuel vers un fichier</translation>
</message>
<message>
<source>&Export</source>
<translation>&Exporter</translation>
</message>
<message>
<source>&Delete</source>
<translation>&Supprimer</translation>
</message>
<message>
<source>Choose the address to send coins to</source>
<translation>Choisir l'adresse à laquelle envoyer des pièces</translation>
</message>
<message>
<source>Choose the address to receive coins with</source>
<translation>Choisir l'adresse avec laquelle recevoir des pîèces</translation>
</message>
<message>
<source>C&hoose</source>
<translation>C&hoisir</translation>
</message>
<message>
<source>Sending addresses</source>
<translation>Adresses d'envoi</translation>
</message>
<message>
<source>Receiving addresses</source>
<translation>Adresses de réception</translation>
</message>
<message>
<source>These are your BitcoinPlus addresses for sending payments. Always check the amount and the receiving address before sending coins.</source>
<translation>Voici vos adresses BitcoinPlus pour envoyer des paiements. Vérifiez toujours le montant et l'adresse du destinataire avant d'envoyer des pièces.</translation>
</message>
<message>
<source>These are your BitcoinPlus addresses for receiving payments. It is recommended to use a new receiving address for each transaction.</source>
<translation>Voici vos adresses BitcoinPlus pour recevoir des paiements. Il est recommandé d'utiliser une nouvelle adresse de réception pour chaque transaction.</translation>
</message>
<message>
<source>&Copy Address</source>
<translation>&Copier l'adresse</translation>
</message>
<message>
<source>Copy &Label</source>
<translation>Copier l'é&tiquette</translation>
</message>
<message>
<source>&Edit</source>
<translation>&Modifier</translation>
</message>
<message>
<source>Export Address List</source>
<translation>Exporter la liste d'adresses</translation>
</message>
<message>
<source>Comma separated file (*.csv)</source>
<translation>Valeurs séparées par des virgules (*.csv)</translation>
</message>
<message>
<source>Exporting Failed</source>
<translation>Échec d'exportation</translation>
</message>
<message>
<source>There was an error trying to save the address list to %1. Please try again.</source>
<translation>Une erreur est survenue lors de l'enregistrement de la liste d'adresses vers %1. Veuillez ressayer plus tard.</translation>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<source>Label</source>
<translation>Étiquette</translation>
</message>
<message>
<source>Address</source>
<translation>Adresse</translation>
</message>
<message>
<source>(no label)</source>
<translation>(aucune étiquette)</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<source>Passphrase Dialog</source>
<translation>Fenêtre de dialogue de la phrase de passe</translation>
</message>
<message>
<source>Enter passphrase</source>
<translation>Saisir la phrase de passe</translation>
</message>
<message>
<source>New passphrase</source>
<translation>Nouvelle phrase de passe</translation>
</message>
<message>
<source>Repeat new passphrase</source>
<translation>Répéter la phrase de passe</translation>
</message>
<message>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>ten or more random characters</b>, or <b>eight or more words</b>.</source>
<translation>Saisissez la nouvelle phrase de passe du porte-monnaie.<br/>Veuillez utiliser une phrase de passe composée de <b>dix caractères aléatoires ou plus</b>, ou de <b>huit mots ou plus</b>.</translation>
</message>
<message>
<source>Encrypt wallet</source>
<translation>Chiffrer le porte-monnaie</translation>
</message>
<message>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation>Cette opération nécessite votre phrase de passe pour déverrouiller le porte-monnaie.</translation>
</message>
<message>
<source>Unlock wallet</source>
<translation>Déverrouiller le porte-monnaie</translation>
</message>
<message>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation>Cette opération nécessite votre phrase de passe pour déchiffrer le porte-monnaie.</translation>
</message>
<message>
<source>Decrypt wallet</source>
<translation>Déchiffrer le porte-monnaie</translation>
</message>
<message>
<source>Change passphrase</source>
<translation>Changer la phrase de passe</translation>
</message>
<message>
<source>Enter the old passphrase and new passphrase to the wallet.</source>
<translation>Saisir l'ancienne puis la nouvelle phrase de passe du porte-monnaie.</translation>
</message>
<message>
<source>Confirm wallet encryption</source>
<translation>Confirmer le chiffrement du porte-monnaie</translation>
</message>
<message>
<source>Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR BITCOINPLUS</b>!</source>
<translation>Avertissement : si vous chiffrez votre porte-monnaie et perdez votre phrase de passe, vous <b>PERDREZ TOUS VOS BITCOINPLUS</b> !</translation>
</message>
<message>
<source>Are you sure you wish to encrypt your wallet?</source>
<translation>Voulez-vous vraiment chiffrer votre porte-monnaie ?</translation>
</message>
<message>
<source>Wallet encrypted</source>
<translation>Le porte-monnaie est chiffré</translation>
</message>
<message>
<source>%1 will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your bitcoinplus from being stolen by malware infecting your computer.</source>
<translation>%1 va maintenant se fermer pour terminer le processus de chiffrement. Souvenez-vous que le chiffrement de votre porte-monnaie ne peut pas protéger entièrement vos bitcoinplus contre le vol par des logiciels malveillants qui infecteraient votre ordinateur.</translation>
</message>
<message>
<source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source>
<translation>IMPORTANT : toutes les sauvegardes précédentes du fichier de votre porte-monnaie devraient être remplacées par le fichier du porte-monnaie chiffré nouvellement généré. Pour des raisons de sécurité, les sauvegardes précédentes de votre fichier de porte-monnaie non chiffré deviendront inutilisables dès que vous commencerez à utiliser le nouveau porte-monnaie chiffré.</translation>
</message>
<message>
<source>Wallet encryption failed</source>
<translation>Échec de chiffrement du porte-monnaie</translation>
</message>
<message>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation>Le chiffrement du porte-monnaie a échoué en raison d'une erreur interne. Votre porte-monnaie n'a pas été chiffré.</translation>
</message>
<message>
<source>The supplied passphrases do not match.</source>
<translation>Les phrases de passe saisies ne correspondent pas.</translation>
</message>
<message>
<source>Wallet unlock failed</source>
<translation>Échec de déverrouillage du porte-monnaie</translation>
</message>
<message>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation>La phrase de passe saisie pour déchiffrer le porte-monnaie était erronée.</translation>
</message>
<message>
<source>Wallet decryption failed</source>
<translation>Échec de déchiffrement du porte-monnaie</translation>
</message>
<message>
<source>Wallet passphrase was successfully changed.</source>
<translation>La phrase de passe du porte-monnaie a été modifiée avec succès.</translation>
</message>
<message>
<source>Warning: The Caps Lock key is on!</source>
<translation>Avertissement : la touche Verr. Maj. est activée !</translation>
</message>
</context>
<context>
<name>BanTableModel</name>
<message>
<source>IP/Netmask</source>
<translation>IP/masque réseau</translation>
</message>
<message>
<source>Banned Until</source>
<translation>Banni jusqu'au</translation>
</message>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<source>Sign &message...</source>
<translation>Signer un &message...</translation>
</message>
<message>
<source>Synchronizing with network...</source>
<translation>Synchronisation avec le réseau…</translation>
</message>
<message>
<source>&Overview</source>
<translation>&Vue d'ensemble</translation>
</message>
<message>
<source>Node</source>
<translation>Nœud</translation>
</message>
<message>
<source>Show general overview of wallet</source>
<translation>Afficher une vue d’ensemble du porte-monnaie</translation>
</message>
<message>
<source>&Transactions</source>
<translation>&Transactions</translation>
</message>
<message>
<source>Browse transaction history</source>
<translation>Parcourir l'historique transactionnel</translation>
</message>
<message>
<source>E&xit</source>
<translation>Q&uitter</translation>
</message>
<message>
<source>Quit application</source>
<translation>Quitter l’application</translation>
</message>
<message>
<source>&About %1</source>
<translation>À &propos de %1</translation>
</message>
<message>
<source>Show information about %1</source>
<translation>Afficher des informations à propos de %1</translation>
</message>
<message>
<source>About &Qt</source>
<translation>À propos de &Qt</translation>
</message>
<message>
<source>Show information about Qt</source>
<translation>Afficher des informations sur Qt</translation>
</message>
<message>
<source>&Options...</source>
<translation>&Options…</translation>
</message>
<message>
<source>Modify configuration options for %1</source>
<translation>Modifier les options de configuration de %1</translation>
</message>
<message>
<source>&Encrypt Wallet...</source>
<translation>&Chiffrer le porte-monnaie...</translation>
</message>
<message>
<source>&Backup Wallet...</source>
<translation>Sauvegarder le &porte-monnaie...</translation>
</message>
<message>
<source>&Change Passphrase...</source>
<translation>&Changer la phrase de passe...</translation>
</message>
<message>
<source>&Sending addresses...</source>
<translation>Adresses d'&envoi...</translation>
</message>
<message>
<source>&Receiving addresses...</source>
<translation>Adresses de &réception...</translation>
</message>
<message>
<source>Open &URI...</source>
<translation>Ouvrir une &URI...</translation>
</message>
<message>
<source>Reindexing blocks on disk...</source>
<translation>Réindexation des blocs sur le disque...</translation>
</message>
<message>
<source>Send coins to a BitcoinPlus address</source>
<translation>Envoyer des pièces à une adresse BitcoinPlus</translation>
</message>
<message>
<source>Backup wallet to another location</source>
<translation>Sauvegarder le porte-monnaie vers un autre emplacement</translation>
</message>
<message>
<source>Change the passphrase used for wallet encryption</source>
<translation>Modifier la phrase de passe utilisée pour le chiffrement du porte-monnaie</translation>
</message>
<message>
<source>&Debug window</source>
<translation>Fenêtre de &débogage</translation>
</message>
<message>
<source>Open debugging and diagnostic console</source>
<translation>Ouvrir une console de débogage et de diagnostic</translation>
</message>
<message>
<source>&Verify message...</source>
<translation>&Vérifier un message...</translation>
</message>
<message>
<source>BitcoinPlus</source>
<translation>BitcoinPlus</translation>
</message>
<message>
<source>Wallet</source>
<translation>Porte-monnaie</translation>
</message>
<message>
<source>&Send</source>
<translation>&Envoyer</translation>
</message>
<message>
<source>&Receive</source>
<translation>&Recevoir</translation>
</message>
<message>
<source>&Show / Hide</source>
<translation>&Afficher / cacher</translation>
</message>
<message>
<source>Show or hide the main Window</source>
<translation>Afficher ou cacher la fenêtre principale</translation>
</message>
<message>
<source>Encrypt the private keys that belong to your wallet</source>
<translation>Chiffrer les clés privées qui appartiennent à votre porte-monnaie</translation>
</message>
<message>
<source>Sign messages with your BitcoinPlus addresses to prove you own them</source>
<translation>Signer les messages avec vos adresses BitcoinPlus pour prouver que vous les détenez</translation>
</message>
<message>
<source>Verify messages to ensure they were signed with specified BitcoinPlus addresses</source>
<translation>Vérifier les messages pour s'assurer qu'ils ont été signés avec les adresses BitcoinPlus spécifiées</translation>
</message>
<message>
<source>&File</source>
<translation>&Fichier</translation>
</message>
<message>
<source>&Settings</source>
<translation>&Paramètres</translation>
</message>
<message>
<source>&Help</source>
<translation>&Aide</translation>
</message>
<message>
<source>Tabs toolbar</source>
<translation>Barre d'outils des onglets</translation>
</message>
<message>
<source>Request payments (generates QR codes and bitcoinplus: URIs)</source>
<translation>Demander des paiements (génère des codes QR et des URI bitcoinplus:)</translation>
</message>
<message>
<source>Show the list of used sending addresses and labels</source>
<translation>Afficher la liste d'adresses d'envoi et d'étiquettes utilisées</translation>
</message>
<message>
<source>Show the list of used receiving addresses and labels</source>
<translation>Afficher la liste d'adresses de réception et d'étiquettes utilisées</translation>
</message>
<message>
<source>Open a bitcoinplus: URI or payment request</source>
<translation>Ouvrir une URI bitcoinplus: ou une demande de paiement</translation>
</message>
<message>
<source>&Command-line options</source>
<translation>Options de ligne de &commande</translation>
</message>
<message numerus="yes">
<source>%n active connection(s) to BitcoinPlus network</source>
<translation><numerusform>%n connexion active avec le réseau BitcoinPlus</numerusform><numerusform>%n connexions actives avec le réseau BitcoinPlus</numerusform></translation>
</message>
<message>
<source>Indexing blocks on disk...</source>
<translation>Indexation des blocs sur le disque...</translation>
</message>
<message>
<source>Processing blocks on disk...</source>
<translation>Traitement des blocs sur le disque...</translation>
</message>
<message>
<source>No block source available...</source>
<translation>Aucune source de blocs disponible...</translation>
</message>
<message numerus="yes">
<source>Processed %n block(s) of transaction history.</source>
<translation><numerusform>%n bloc d'historique transactionnel a été traité</numerusform><numerusform>%n blocs d'historique transactionnel ont été traités</numerusform></translation>
</message>
<message numerus="yes">
<source>%n hour(s)</source>
<translation><numerusform>%n heure</numerusform><numerusform>%n heures</numerusform></translation>
</message>
<message numerus="yes">
<source>%n day(s)</source>
<translation><numerusform>%n jour</numerusform><numerusform>%n jours</numerusform></translation>
</message>
<message numerus="yes">
<source>%n week(s)</source>
<translation><numerusform>%n semaine</numerusform><numerusform>%n semaines</numerusform></translation>
</message>
<message>
<source>%1 and %2</source>
<translation>%1 et %2</translation>
</message>
<message numerus="yes">
<source>%n year(s)</source>
<translation><numerusform>%n an</numerusform><numerusform>%n ans</numerusform></translation>
</message>
<message>
<source>%1 behind</source>
<translation>en retard de %1</translation>
</message>
<message>
<source>Last received block was generated %1 ago.</source>
<translation>Le dernier bloc reçu avait été généré il y a %1.</translation>
</message>
<message>
<source>Transactions after this will not yet be visible.</source>
<translation>Les transactions suivantes ne seront pas déjà visibles.</translation>
</message>
<message>
<source>Error</source>
<translation>Erreur</translation>
</message>
<message>
<source>Warning</source>
<translation>Avertissement</translation>
</message>
<message>
<source>Information</source>
<translation>Information</translation>
</message>
<message>
<source>Up to date</source>
<translation>À jour</translation>
</message>
<message>
<source>Show the %1 help message to get a list with possible BitcoinPlus command-line options</source>
<translation>Afficher le message d'aide de %1 pour obtenir la liste des options de ligne de commande BitcoinPlus possibles.</translation>
</message>
<message>
<source>%1 client</source>
<translation>Client %1</translation>
</message>
<message>
<source>Catching up...</source>
<translation>Rattrapage…</translation>
</message>
<message>
<source>Date: %1
</source>
<translation>Date : %1
</translation>
</message>
<message>
<source>Amount: %1
</source>
<translation>Montant : %1
</translation>
</message>
<message>
<source>Type: %1
</source>
<translation>Type : %1
</translation>
</message>
<message>
<source>Label: %1
</source>
<translation>Étiquette : %1
</translation>
</message>
<message>
<source>Address: %1
</source>
<translation>Adresse : %1
</translation>
</message>
<message>
<source>Sent transaction</source>
<translation>Transaction envoyée</translation>
</message>
<message>
<source>Incoming transaction</source>
<translation>Transaction entrante</translation>
</message>
<message>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation>Le porte-monnaie est <b>chiffré</b> et est actuellement <b>déverrouillé</b></translation>
</message>
<message>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation>Le porte-monnaie est <b>chiffré</b> et actuellement <b>verrouillé</b></translation>
</message>
</context>
<context>
<name>CoinControlDialog</name>
<message>
<source>Coin Selection</source>
<translation>Sélection des pièces</translation>
</message>
<message>
<source>Quantity:</source>
<translation>Quantité :</translation>
</message>
<message>
<source>Bytes:</source>
<translation>Octets :</translation>
</message>
<message>
<source>Amount:</source>
<translation>Montant :</translation>
</message>
<message>
<source>Priority:</source>
<translation>Priorité :</translation>
</message>
<message>
<source>Fee:</source>
<translation>Frais :</translation>
</message>
<message>
<source>Dust:</source>
<translation>Poussière :</translation>
</message>
<message>
<source>After Fee:</source>
<translation>Après les frais :</translation>
</message>
<message>
<source>Change:</source>
<translation>Monnaie :</translation>
</message>
<message>
<source>(un)select all</source>
<translation>Tout (des)sélectionner</translation>
</message>
<message>
<source>Tree mode</source>
<translation>Mode arborescence</translation>
</message>
<message>
<source>List mode</source>
<translation>Mode liste</translation>
</message>
<message>
<source>Amount</source>
<translation>Montant</translation>
</message>
<message>
<source>Received with label</source>
<translation>Reçu avec une étiquette</translation>
</message>
<message>
<source>Received with address</source>
<translation>Reçu avec une adresse</translation>
</message>
<message>
<source>Date</source>
<translation>Date</translation>
</message>
<message>
<source>Confirmations</source>
<translation>Confirmations</translation>
</message>
<message>
<source>Confirmed</source>
<translation>Confirmée</translation>
</message>
<message>
<source>Priority</source>
<translation>Priorité</translation>
</message>
<message>
<source>Copy address</source>
<translation>Copier l’adresse</translation>
</message>
<message>
<source>Copy label</source>
<translation>Copier l’étiquette</translation>
</message>
<message>
<source>Copy amount</source>
<translation>Copier le montant</translation>
</message>
<message>
<source>Copy transaction ID</source>
<translation>Copier l'ID de la transaction</translation>
</message>
<message>
<source>Lock unspent</source>
<translation>Verrouiller les transactions non dépensées</translation>
</message>
<message>
<source>Unlock unspent</source>
<translation>Déverrouiller les transactions non dépensées</translation>
</message>
<message>
<source>Copy quantity</source>
<translation>Copier la quantité</translation>
</message>
<message>
<source>Copy fee</source>
<translation>Copier les frais</translation>
</message>
<message>
<source>Copy after fee</source>
<translation>Copier après les frais</translation>
</message>
<message>
<source>Copy bytes</source>
<translation>Copier les octets</translation>
</message>
<message>
<source>Copy priority</source>
<translation>Copier la priorité</translation>
</message>
<message>
<source>Copy dust</source>
<translation>Copier la poussière</translation>
</message>
<message>
<source>Copy change</source>
<translation>Copier la monnaie</translation>
</message>
<message>
<source>highest</source>
<translation>la plus élevée</translation>
</message>
<message>
<source>higher</source>
<translation>plus élevée</translation>
</message>
<message>
<source>high</source>
<translation>élevée</translation>
</message>
<message>
<source>medium-high</source>
<translation>moyenne-élevée</translation>
</message>
<message>
<source>medium</source>
<translation>moyenne</translation>
</message>
<message>
<source>low-medium</source>
<translation>faible-moyenne</translation>
</message>
<message>
<source>low</source>
<translation>faible</translation>
</message>
<message>
<source>lower</source>
<translation>plus faible</translation>
</message>
<message>
<source>lowest</source>
<translation>la plus faible</translation>
</message>
<message>
<source>(%1 locked)</source>
<translation>(%1 verrouillée)</translation>
</message>
<message>
<source>none</source>
<translation>aucune</translation>
</message>
<message>
<source>yes</source>
<translation>oui</translation>
</message>
<message>
<source>no</source>
<translation>non</translation>
</message>
<message>
<source>This label turns red if the transaction size is greater than 1000 bytes.</source>
<translation>Cette étiquette devient rouge si la taille de la transaction dépasse 1 000 octets.</translation>
</message>
<message>
<source>This means a fee of at least %1 per kB is required.</source>
<translation>Cela signifie que des frais d'au moins %1 sont exigés par Ko.</translation>
</message>
<message>
<source>Can vary +/- 1 byte per input.</source>
<translation>Peut varier +/- 1 octet par entrée.</translation>
</message>
<message>
<source>Transactions with higher priority are more likely to get included into a block.</source>
<translation>Les transactions à priorité élevée on plus de chance d'être incluses dans un bloc.</translation>
</message>
<message>
<source>This label turns red if the priority is smaller than "medium".</source>
<translation>Cette étiquette devient rouge si la priorité est plus basse que « moyenne ».</translation>
</message>
<message>
<source>This label turns red if any recipient receives an amount smaller than the current dust threshold.</source>
<translation>Cette étiquette devient rouge si un destinataire reçoit un montant inférieur au seuil actuel de poussière.</translation>
</message>
<message>
<source>Can vary +/- %1 satoshi(s) per input.</source>
<translation>Peut varier +/- %1 satoshi(s) par entrée.</translation>
</message>
<message>
<source>(no label)</source>
<translation>(aucune étiquette)</translation>
</message>
<message>
<source>change from %1 (%2)</source>
<translation>monnaie de %1 (%2)</translation>
</message>
<message>
<source>(change)</source>
<translation>(monnaie)</translation>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<source>Edit Address</source>
<translation>Modifier l'adresse</translation>
</message>
<message>
<source>&Label</source>
<translation>É&tiquette</translation>
</message>
<message>
<source>The label associated with this address list entry</source>
<translation>L'étiquette associée à cette entrée de la liste d'adresses</translation>
</message>
<message>
<source>The address associated with this address list entry. This can only be modified for sending addresses.</source>
<translation>L'adresse associée à cette entrée de la liste d'adresses. Cela ne peut être modifié que pour les adresses d'envoi.</translation>
</message>
<message>
<source>&Address</source>
<translation>&Adresse</translation>
</message>
<message>
<source>New receiving address</source>
<translation>Nouvelle adresse de réception</translation>
</message>
<message>
<source>New sending address</source>
<translation>Nouvelle adresse d’envoi</translation>
</message>
<message>
<source>Edit receiving address</source>
<translation>Modifier l’adresse de réception</translation>
</message>
<message>
<source>Edit sending address</source>
<translation>Modifier l’adresse d'envoi</translation>
</message>
<message>
<source>The entered address "%1" is not a valid BitcoinPlus address.</source>
<translation>L'adresse saisie « %1 » n'est pas une adresse BitcoinPlus valide.</translation>
</message>
<message>
<source>The entered address "%1" is already in the address book.</source>
<translation>L’adresse saisie « %1 » est déjà présente dans le carnet d'adresses.</translation>
</message>
<message>
<source>Could not unlock wallet.</source>
<translation>Impossible de déverrouiller le porte-monnaie.</translation>
</message>
<message>
<source>New key generation failed.</source>
<translation>Échec de génération de la nouvelle clé.</translation>
</message>
</context>
<context>
<name>FreespaceChecker</name>
<message>
<source>A new data directory will be created.</source>
<translation>Un nouveau répertoire de données sera créé.</translation>
</message>
<message>
<source>name</source>
<translation>nom</translation>
</message>
<message>
<source>Directory already exists. Add %1 if you intend to create a new directory here.</source>
<translation>Le répertoire existe déjà. Ajouter %1 si vous comptez créer un nouveau répertoire ici.</translation>
</message>
<message>
<source>Path already exists, and is not a directory.</source>
<translation>Le chemin existe déjà et n'est pas un répertoire.</translation>
</message>
<message>
<source>Cannot create data directory here.</source>
<translation>Impossible de créer un répertoire de données ici.</translation>
</message>
</context>
<context>
<name>HelpMessageDialog</name>
<message>
<source>version</source>
<translation>version</translation>
</message>
<message>
<source>(%1-bit)</source>
<translation>(%1-bit)</translation>
</message>
<message>
<source>About %1</source>
<translation>À propos de %1</translation>
</message>
<message>
<source>Command-line options</source>
<translation>Options de ligne de commande</translation>
</message>
<message>
<source>Usage:</source>
<translation>Utilisation :</translation>
</message>
<message>
<source>command-line options</source>
<translation>options de ligne de commande</translation>
</message>
<message>
<source>UI Options:</source>
<translation>Options de l'IU :</translation>
</message>
<message>
<source>Choose data directory on startup (default: %u)</source>
<translation>Choisir un répertoire de données au démarrage (par défaut : %u)</translation>
</message>
<message>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation>Définir la langue, par exemple « fr_CA » (par défaut : la langue du système)</translation>
</message>
<message>
<source>Start minimized</source>
<translation>Démarrer minimisé</translation>
</message>
<message>
<source>Set SSL root certificates for payment request (default: -system-)</source>
<translation>Définir les certificats SSL racine pour les requêtes de paiement (par défaut : -system-)</translation>
</message>
<message>
<source>Show splash screen on startup (default: %u)</source>
<translation>Afficher l'écran d'accueil au démarrage (par défaut : %u)</translation>
</message>
<message>
<source>Reset all settings changed in the GUI</source>
<translation>Réinitialiser tous les paramètres changés dans l'IUG</translation>
</message>
</context>
<context>
<name>Intro</name>
<message>
<source>Welcome</source>
<translation>Bienvenue</translation>
</message>
<message>
<source>Welcome to %1.</source>
<translation>Bienvenue à %1.</translation>
</message>
<message>
<source>As this is the first time the program is launched, you can choose where %1 will store its data.</source>
<translation>Puisque c'est la première fois que le logiciel est lancé, vous pouvez choisir où %1 stockera ses données.</translation>
</message>
<message>
<source>%1 will download and store a copy of the BitcoinPlus block chain. At least %2GB of data will be stored in this directory, and it will grow over time. The wallet will also be stored in this directory.</source>
<translation>%1 téléchargera et stockera une copie de la chaîne de blocs de BitcoinPlus. Au moins %2 Go de données seront stockés dans ce répertoire et sa taille augmentera avec le temps. Le porte-monnaie sera également stocké dans ce répertoire.</translation>
</message>
<message>
<source>Use the default data directory</source>
<translation>Utiliser le répertoire de données par défaut</translation>
</message>
<message>
<source>Use a custom data directory:</source>
<translation>Utiliser un répertoire de données personnalisé :</translation>
</message>
<message>
<source>Error: Specified data directory "%1" cannot be created.</source>
<translation>Erreur : le répertoire de données spécifié « %1 » ne peut pas être créé.</translation>
</message>
<message>
<source>Error</source>
<translation>Erreur</translation>
</message>
<message numerus="yes">
<source>%n GB of free space available</source>
<translation><numerusform>%n Go d'espace libre disponible</numerusform><numerusform>%n Go d'espace libre disponibles</numerusform></translation>
</message>
<message numerus="yes">
<source>(of %n GB needed)</source>
<translation><numerusform>(sur %n Go requis)</numerusform><numerusform>(sur %n Go requis)</numerusform></translation>
</message>
</context>
<context>
<name>OpenURIDialog</name>
<message>
<source>Open URI</source>
<translation>Ouvrir une URI</translation>
</message>
<message>
<source>Open payment request from URI or file</source>
<translation>Ouvrir une demande de paiement à partir d'une URI ou d'un fichier</translation>
</message>
<message>
<source>URI:</source>
<translation>URI :</translation>
</message>
<message>
<source>Select payment request file</source>
<translation>Choisir le fichier de demande de paiement</translation>
</message>
<message>
<source>Select payment request file to open</source>
<translation>Choisir le fichier de demande de paiement à ouvrir</translation>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<source>Options</source>
<translation>Options</translation>
</message>
<message>
<source>&Main</source>
<translation>&Principaux</translation>
</message>
<message>
<source>Automatically start %1 after logging in to the system.</source>
<translation>Démarrer %1 automatiquement après avoir ouvert une session sur l'ordinateur.</translation>
</message>
<message>
<source>&Start %1 on system login</source>
<translation>&Démarrer %1 lors de l'ouverture d'une session</translation>
</message>
<message>
<source>Size of &database cache</source>
<translation>Taille du cache de la base de &données</translation>
</message>
<message>
<source>MB</source>
<translation>Mo</translation>
</message>
<message>
<source>Number of script &verification threads</source>
<translation>Nombre de fils de &vérification de script</translation>
</message>
<message>
<source>Accept connections from outside</source>
<translation>Accepter les connexions provenant de l'extérieur</translation>
</message>
<message>
<source>Allow incoming connections</source>
<translation>Permettre les transactions entrantes</translation>
</message>
<message>
<source>IP address of the proxy (e.g. IPv4: 127.0.0.1 / IPv6: ::1)</source>
<translation>Adresse IP du mandataire (p. ex. IPv4 : 127.0.0.1 / IPv6 : ::1)</translation>
</message>
<message>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Exit in the menu.</source>
<translation>Minimiser au lieu de quitter l'application lorsque la fenêtre est fermée. Si cette option est activée, l'application ne sera fermée qu'en sélectionnant Quitter dans le menu.</translation>
</message>
<message>
<source>Third party URLs (e.g. a block explorer) that appear in the transactions tab as context menu items. %s in the URL is replaced by transaction hash. Multiple URLs are separated by vertical bar |.</source>
<translation>URL de tiers (p. ex. un explorateur de blocs) apparaissant dans l'onglet des transactions comme éléments du menu contextuel. %s dans l'URL est remplacé par le hachage de la transaction. Les URL multiples sont séparées par une barre verticale |.</translation>
</message>
<message>
<source>Third party transaction URLs</source>
<translation>URL de transaction d'un tiers</translation>
</message>
<message>
<source>Active command-line options that override above options:</source>
<translation>Options actives de ligne de commande qui annulent les options ci-dessus :</translation>
</message>
<message>
<source>Reset all client options to default.</source>
<translation>Réinitialiser toutes les options du client aux valeurs par défaut.</translation>
</message>
<message>
<source>&Reset Options</source>
<translation>&Réinitialiser les options</translation>
</message>
<message>
<source>&Network</source>
<translation>&Réseau</translation>
</message>
<message>
<source>(0 = auto, <0 = leave that many cores free)</source>
<translation>(0 = auto, < 0 = laisser ce nombre de cœurs inutilisés)</translation>
</message>
<message>
<source>W&allet</source>
<translation>&Porte-monnaie</translation>
</message>
<message>
<source>Expert</source>
<translation>Expert</translation>
</message>
<message>
<source>Enable coin &control features</source>
<translation>Activer les fonctions de &contrôle des pièces</translation>
</message>
<message>
<source>If you disable the spending of unconfirmed change, the change from a transaction cannot be used until that transaction has at least one confirmation. This also affects how your balance is computed.</source>
<translation>Si vous désactivé la dépense de la monnaie non confirmée, la monnaie d'une transaction ne peut pas être utilisée tant que cette transaction n'a pas reçu au moins une confirmation. Celai affecte aussi le calcul de votre solde.</translation>
</message>
<message>
<source>&Spend unconfirmed change</source>
<translation>&Dépenser la monnaie non confirmée</translation>
</message>
<message>
<source>Automatically open the BitcoinPlus client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation>Ouvrir automatiquement le port du client BitcoinPlus sur le routeur. Cela ne fonctionne que si votre routeur prend en charge l'UPnP et si la fonction est activée.</translation>
</message>
<message>
<source>Map port using &UPnP</source>
<translation>Mapper le port avec l'&UPnP</translation>
</message>
<message>
<source>Connect to the BitcoinPlus network through a SOCKS5 proxy.</source>
<translation>Se connecter au réseau BitcoinPlus par un mandataire SOCKS5.</translation>
</message>
<message>
<source>&Connect through SOCKS5 proxy (default proxy):</source>
<translation>Se &connecter par un mandataire SOCKS5 (mandataire par défaut) :</translation>
</message>
<message>
<source>Proxy &IP:</source>
<translation>&IP du mandataire :</translation>
</message>
<message>
<source>&Port:</source>
<translation>&Port :</translation>
</message>
<message>
<source>Port of the proxy (e.g. 9050)</source>
<translation>Port du mandataire (p. ex. 9050)</translation>
</message>
<message>
<source>Used for reaching peers via:</source>
<translation>Utilisé pour rejoindre les pairs par :</translation>
</message>
<message>
<source>Shows, if the supplied default SOCKS5 proxy is used to reach peers via this network type.</source>
<translation>S'affiche, si le mandataire SOCKS5 par défaut fourni est utilisé pour atteindre les pairs par ce type de réseau.</translation>
</message>
<message>
<source>IPv4</source>
<translation>IPv4</translation>
</message>
<message>
<source>IPv6</source>
<translation>IPv6</translation>
</message>
<message>
<source>Tor</source>
<translation>Tor</translation>
</message>
<message>
<source>Connect to the BitcoinPlus network through a separate SOCKS5 proxy for Tor hidden services.</source>
<translation>Se connecter au réseau BitcoinPlus au travers d'un mandataire SOCKS5 séparé pour les services cachés de Tor.</translation>
</message>
<message>
<source>Use separate SOCKS5 proxy to reach peers via Tor hidden services:</source>
<translation>Utiliser un mandataire SOCKS5 séparé pour atteindre les pairs grâce aux services cachés de Tor :</translation>
</message>
<message>
<source>&Window</source>
<translation>&Fenêtre</translation>
</message>
<message>
<source>&Hide the icon from the system tray.</source>
<translation>&Cacher l'icône dans la zone de notification.</translation>
</message>
<message>
<source>Hide tray icon</source>
<translation>Cacher l'icône de la zone de notification</translation>
</message>
<message>
<source>Show only a tray icon after minimizing the window.</source>
<translation>N'afficher qu'une icône dans la zone de notification après minimisation.</translation>
</message>
<message>
<source>&Minimize to the tray instead of the taskbar</source>
<translation>&Minimiser dans la zone de notification au lieu de la barre des tâches</translation>
</message>
<message>
<source>M&inimize on close</source>
<translation>M&inimiser lors de la fermeture</translation>
</message>
<message>
<source>&Display</source>
<translation>&Affichage</translation>
</message>
<message>
<source>User Interface &language:</source>
<translation>&Langue de l'interface utilisateur :</translation>
</message>
<message>
<source>The user interface language can be set here. This setting will take effect after restarting %1.</source>
<translation>La langue de l'interface utilisateur peut être définie ici. Ce réglage sera pris en compte après redémarrage de %1.</translation>
</message>
<message>
<source>&Unit to show amounts in:</source>
<translation>&Unité d'affichage des montants :</translation>
</message>
<message>
<source>Choose the default subdivision unit to show in the interface and when sending coins.</source>
<translation>Choisir la sous-unité par défaut d'affichage dans l'interface et lors d'envoi de pièces.</translation>
</message>
<message>
<source>Whether to show coin control features or not.</source>
<translation>Afficher ou non les fonctions de contrôle des pièces.</translation>
</message>
<message>
<source>&OK</source>
<translation>&OK</translation>
</message>
<message>
<source>&Cancel</source>
<translation>A&nnuler</translation>
</message>
<message>
<source>default</source>
<translation>par défaut</translation>
</message>
<message>
<source>none</source>
<translation>aucune</translation>
</message>
<message>
<source>Confirm options reset</source>
<translation>Confirmer la réinitialisation des options</translation>
</message>
<message>
<source>Client restart required to activate changes.</source>
<translation>Le redémarrage du client est exigé pour activer les changements.</translation>
</message>
<message>
<source>Client will be shut down. Do you want to proceed?</source>
<translation>Le client sera arrêté. Voulez-vous continuer ?</translation>
</message>
<message>
<source>This change would require a client restart.</source>
<translation>Ce changement demanderait un redémarrage du client.</translation>
</message>
<message>
<source>The supplied proxy address is invalid.</source>
<translation>L'adresse de serveur mandataire fournie est invalide.</translation>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<source>Form</source>
<translation>Formulaire</translation>
</message>
<message>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the BitcoinPlus network after a connection is established, but this process has not completed yet.</source>
<translation>Les informations affichées peuvent être obsolètes. Votre porte-monnaie est automatiquement synchronisé avec le réseau BitcoinPlus lorsque la connexion s'établit, or ce processus n'est pas encore terminé.</translation>
</message>
<message>
<source>Watch-only:</source>
<translation>Juste-regarder :</translation>
</message>
<message>
<source>Available:</source>
<translation>Disponible :</translation>
</message>
<message>
<source>Your current spendable balance</source>
<translation>Votre solde actuel disponible</translation>
</message>
<message>
<source>Pending:</source>
<translation>En attente :</translation>
</message>
<message>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the spendable balance</source>
<translation>Total des transactions qui doivent encore être confirmées et qui ne sont pas prises en compte dans le solde disponible</translation>
</message>
<message>
<source>Immature:</source>
<translation>Immature :</translation>
</message>
<message>
<source>Mined balance that has not yet matured</source>
<translation>Le solde miné n'est pas encore mûr</translation>
</message>
<message>
<source>Balances</source>
<translation>Soldes</translation>
</message>
<message>
<source>Total:</source>
<translation>Total :</translation>
</message>
<message>
<source>Your current total balance</source>
<translation>Votre solde total actuel</translation>
</message>
<message>
<source>Your current balance in watch-only addresses</source>
<translation>Votre balance actuelle en adresses juste-regarder</translation>
</message>
<message>
<source>Spendable:</source>
<translation>Disponible :</translation>
</message>
<message>
<source>Recent transactions</source>
<translation>Transactions récentes</translation>
</message>
<message>
<source>Unconfirmed transactions to watch-only addresses</source>
<translation>Transactions non confirmées vers des adresses juste-regarder</translation>
</message>
<message>
<source>Mined balance in watch-only addresses that has not yet matured</source>
<translation>Le solde miné dans des adresses juste-regarder, qui n'est pas encore mûr</translation>
</message>
<message>
<source>Current total balance in watch-only addresses</source>
<translation>Solde total actuel dans des adresses juste-regarder</translation>
</message>
</context>
<context>
<name>PaymentServer</name>
<message>
<source>Payment request error</source>
<translation>Erreur de demande de paiement</translation>
</message>
<message>
<source>Cannot start bitcoinplus: click-to-pay handler</source>
<translation>Impossible de démarrer le gestionnaire de cliquer-pour-payer bitcoinplus:</translation>
</message>
<message>
<source>URI handling</source>
<translation>Gestion des URI</translation>
</message>
<message>
<source>Payment request fetch URL is invalid: %1</source>
<translation>L'URL de récupération de la demande de paiement est invalide : %1</translation>
</message>
<message>
<source>Invalid payment address %1</source>
<translation>Adresse de paiement invalide %1</translation>
</message>
<message>
<source>URI cannot be parsed! This can be caused by an invalid BitcoinPlus address or malformed URI parameters.</source>
<translation>L'URI ne peut pas être analysée ! Cela peut être causé par une adresse BitcoinPlus invalide ou par des paramètres d'URI mal formés.</translation>
</message>
<message>
<source>Payment request file handling</source>
<translation>Gestion des fichiers de demande de paiement</translation>
</message>
<message>
<source>Payment request file cannot be read! This can be caused by an invalid payment request file.</source>
<translation>Le fichier de demande de paiement ne peut pas être lu ! Cela peut être causé par un fichier de demande de paiement invalide.</translation>
</message>
<message>
<source>Payment request rejected</source>
<translation>Demande de paiement rejetée</translation>
</message>
<message>
<source>Payment request network doesn't match client network.</source>
<translation>Le réseau de la demande de paiement ne correspond pas au réseau du client.</translation>
</message>
<message>
<source>Payment request expired.</source>
<translation>La demande de paiement a expiré</translation>
</message>
<message>
<source>Payment request is not initialized.</source>
<translation>La demande de paiement n'est pas initialisée.</translation>
</message>
<message>
<source>Unverified payment requests to custom payment scripts are unsupported.</source>
<translation>Les demandes de paiements non vérifiées vers des scripts de paiement personnalisés ne sont pas prises en charge.</translation>
</message>
<message>
<source>Invalid payment request.</source>
<translation>Demande de paiement invalide.</translation>
</message>
<message>
<source>Requested payment amount of %1 is too small (considered dust).</source>
<translation>Le paiement demandé d'un montant de %1 est trop faible (considéré comme de la poussière).</translation>
</message>
<message>
<source>Refund from %1</source>
<translation>Remboursement de %1</translation>
</message>
<message>
<source>Payment request %1 is too large (%2 bytes, allowed %3 bytes).</source>
<translation>La demande de paiement %1 est trop grande (%2 octets, %3 octets permis).</translation>
</message>
<message>
<source>Error communicating with %1: %2</source>
<translation>Erreur de communication avec %1 : %2</translation>
</message>
<message>
<source>Payment request cannot be parsed!</source>
<translation>La demande de paiement ne peut pas être analysée !</translation>
</message>
<message>
<source>Bad response from server %1</source>
<translation>Mauvaise réponse du serveur %1</translation>
</message>
<message>
<source>Network request error</source>
<translation>Erreur de demande réseau</translation>
</message>
<message>
<source>Payment acknowledged</source>
<translation>Le paiement a été confirmé</translation>
</message>
</context>
<context>
<name>PeerTableModel</name>
<message>
<source>User Agent</source>
<translation>Agent utilisateur</translation>
</message>
<message>
<source>Node/Service</source>
<translation>Nœud/service</translation>
</message>
<message>
<source>Ping Time</source>
<translation>Temps de ping</translation>
</message>
</context>
<context>
<name>QObject</name>
<message>
<source>Amount</source>
<translation>Montant</translation>
</message>
<message>
<source>Enter a BitcoinPlus address (e.g. %1)</source>
<translation>Saisir une adresse BitcoinPlus (p. ex. %1)</translation>
</message>
<message>
<source>%1 d</source>
<translation>%1 j</translation>
</message>
<message>
<source>%1 h</source>
<translation>%1 h</translation>
</message>
<message>
<source>%1 m</source>
<translation>%1 min</translation>
</message>
<message>
<source>%1 s</source>
<translation>%1 s</translation>
</message>
<message>
<source>None</source>
<translation>Aucun</translation>
</message>
<message>
<source>N/A</source>
<translation>N.D.</translation>
</message>
<message>
<source>%1 ms</source>
<translation>%1 ms</translation>
</message>
</context>
<context>
<name>QRImageWidget</name>
<message>
<source>&Save Image...</source>
<translation>&Enregistrer l'image...</translation>
</message>
<message>
<source>&Copy Image</source>
<translation>&Copier l'image</translation>
</message>
<message>
<source>Save QR Code</source>
<translation>Enregistrer le code QR</translation>
</message>
<message>
<source>PNG Image (*.png)</source>
<translation>Image PNG (*.png)</translation>
</message>
</context>
<context>
<name>RPCConsole</name>
<message>
<source>N/A</source>
<translation>N.D.</translation>
</message>
<message>
<source>Client version</source>
<translation>Version du client</translation>
</message>
<message>
<source>&Information</source>
<translation>&Informations</translation>
</message>
<message>
<source>Debug window</source>
<translation>Fenêtre de débogage</translation>
</message>
<message>
<source>General</source>
<translation>Général</translation>
</message>
<message>
<source>Using BerkeleyDB version</source>
<translation>Version BerkeleyDB utilisée</translation>
</message>
<message>
<source>Datadir</source>
<translation>Datadir</translation>
</message>
<message>
<source>Startup time</source>
<translation>Heure de démarrage</translation>
</message>
<message>
<source>Network</source>
<translation>Réseau</translation>
</message>
<message>
<source>Name</source>
<translation>Nom</translation>
</message>
<message>
<source>Number of connections</source>
<translation>Nombre de connexions</translation>
</message>
<message>
<source>Block chain</source>
<translation>Chaîne de blocs</translation>
</message>
<message>
<source>Current number of blocks</source>
<translation>Nombre actuel de blocs</translation>
</message>
<message>
<source>Memory Pool</source>
<translation>Réserve de mémoire</translation>
</message>
<message>
<source>Current number of transactions</source>
<translation>Nombre actuel de transactions</translation>
</message>
<message>
<source>Memory usage</source>
<translation>Utilisation de la mémoire</translation>
</message>
<message>
<source>Received</source>
<translation>Reçu</translation>
</message>
<message>
<source>Sent</source>
<translation>Envoyé</translation>
</message>
<message>
<source>&Peers</source>
<translation>&Pairs</translation>
</message>
<message>
<source>Banned peers</source>
<translation>Pairs bannis</translation>
</message>
<message>
<source>Select a peer to view detailed information.</source>
<translation>Choisir un pair pour voir l'information détaillée.</translation>
</message>
<message>
<source>Whitelisted</source>
<translation>Dans la liste blanche</translation>
</message>
<message>
<source>Direction</source>
<translation>Direction</translation>
</message>
<message>
<source>Version</source>
<translation>Version</translation>
</message>
<message>
<source>Starting Block</source>
<translation>Bloc de départ</translation>
</message>
<message>
<source>Synced Headers</source>
<translation>En-têtes synchronisés</translation>
</message>
<message>
<source>Synced Blocks</source>
<translation>Blocs synchronisés</translation>
</message>
<message>
<source>User Agent</source>
<translation>Agent utilisateur</translation>
</message>
<message>
<source>Open the %1 debug log file from the current data directory. This can take a few seconds for large log files.</source>
<translation>Ouvrir le fichier journal de débogage de %1 à partir du répertoire de données actuel. Cela peut prendre quelques secondes pour les fichiers journaux de grande taille.</translation>
</message>
<message>
<source>Decrease font size</source>
<translation>Diminuer la taille de police</translation>
</message>
<message>
<source>Increase font size</source>
<translation>Augmenter la taille de police</translation>
</message>
<message>
<source>Services</source>
<translation>Services</translation>
</message>
<message>
<source>Ban Score</source>
<translation>Pointage des bannissements</translation>
</message>
<message>
<source>Connection Time</source>
<translation>Temps de connexion</translation>
</message>
<message>
<source>Last Send</source>
<translation>Dernier envoi</translation>
</message>
<message>
<source>Last Receive</source>
<translation>Dernière réception</translation>
</message>
<message>
<source>Ping Time</source>
<translation>Temps de ping</translation>
</message>
<message>
<source>The duration of a currently outstanding ping.</source>
<translation>La durée d'un ping en cours.</translation>
</message>
<message>
<source>Ping Wait</source>
<translation>Attente du ping</translation>
</message>
<message>
<source>Time Offset</source>
<translation>Décalage temporel</translation>
</message>
<message>
<source>Last block time</source>
<translation>Horodatage du dernier bloc</translation>
</message>
<message>
<source>&Open</source>
<translation>&Ouvrir</translation>
</message>
<message>
<source>&Console</source>
<translation>&Console</translation>
</message>
<message>
<source>&Network Traffic</source>
<translation>Trafic &réseau</translation>
</message>
<message>
<source>&Clear</source>
<translation>&Effacer</translation>
</message>
<message>
<source>Totals</source>
<translation>Totaux</translation>
</message>
<message>
<source>In:</source>
<translation>Entrant :</translation>
</message>
<message>
<source>Out:</source>
<translation>Sortant :</translation>
</message>
<message>
<source>Debug log file</source>
<translation>Fichier journal de débogage</translation>
</message>
<message>
<source>Clear console</source>
<translation>Effacer la console</translation>
</message>
<message>
<source>&Disconnect Node</source>
<translation>&Déconnecter le nœud</translation>
</message>
<message>
<source>Ban Node for</source>
<translation>Bannir le nœud pendant</translation>
</message>
<message>
<source>1 &hour</source>
<translation>1 &heure</translation>
</message>
<message>
<source>1 &day</source>
<translation>1 &jour</translation>
</message>
<message>
<source>1 &week</source>
<translation>1 &semaine</translation>
</message>
<message>
<source>1 &year</source>
<translation>1 &an</translation>
</message>
<message>
<source>&Unban Node</source>
<translation>&Réhabiliter le nœud</translation>
</message>
<message>
<source>Welcome to the %1 RPC console.</source>
<translation>Bienvenue sur la console RPC de %1.</translation>
</message>
<message>
<source>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.</source>
<translation>Utiliser les touches de déplacement pour naviguer dans l'historique et <b>Ctrl-L</b> pour effacer l'écran.</translation>
</message>
<message>
<source>Type <b>help</b> for an overview of available commands.</source>
<translation>Taper <b>help</b> pour afficher une vue générale des commandes proposées.</translation>
</message>
<message>
<source>%1 B</source>
<translation>%1 o</translation>
</message>
<message>
<source>%1 KB</source>
<translation>%1 Ko</translation>
</message>
<message>
<source>%1 MB</source>
<translation>%1 Mo</translation>
</message>
<message>
<source>%1 GB</source>
<translation>%1 Go</translation>
</message>
<message>
<source>(node id: %1)</source>
<translation>(ID de nœud : %1)</translation>
</message>
<message>
<source>via %1</source>
<translation>par %1</translation>
</message>
<message>
<source>never</source>
<translation>jamais</translation>
</message>
<message>
<source>Inbound</source>
<translation>Entrant</translation>
</message>
<message>
<source>Outbound</source>
<translation>Sortant</translation>
</message>
<message>
<source>Yes</source>
<translation>Oui</translation>
</message>
<message>
<source>No</source>
<translation>Non</translation>
</message>
<message>
<source>Unknown</source>
<translation>Inconnu</translation>
</message>
</context>
<context>
<name>ReceiveCoinsDialog</name>
<message>
<source>&Amount:</source>
<translation>&Montant :</translation>
</message>
<message>
<source>&Label:</source>
<translation>&Étiquette :</translation>
</message>
<message>
<source>&Message:</source>
<translation>M&essage :</translation>
</message>
<message>
<source>Reuse one of the previously used receiving addresses. Reusing addresses has security and privacy issues. Do not use this unless re-generating a payment request made before.</source>
<translation>Réutiliser une adresse de réception utilisée précédemment. Réutiliser une adresse comporte des problèmes de sécurité et de confidentialité. À ne pas utiliser, sauf pour générer une demande de paiement faite au préalable.</translation>
</message>
<message>
<source>R&euse an existing receiving address (not recommended)</source>
<translation>Ré&utiliser une adresse de réception existante (non recommandé)</translation>
</message>
<message>
<source>An optional message to attach to the payment request, which will be displayed when the request is opened. Note: The message will not be sent with the payment over the BitcoinPlus network.</source>
<translation>Un message facultatif à joindre à la demande de paiement et qui sera affiché à l'ouverture de celle-ci. Note : le message ne sera pas envoyé avec le paiement par le réseau BitcoinPlus.</translation>
</message>
<message>
<source>An optional label to associate with the new receiving address.</source>
<translation>Un étiquette facultative à associer à la nouvelle adresse de réception.</translation>
</message>
<message>
<source>Use this form to request payments. All fields are <b>optional</b>.</source>
<translation>Utiliser ce formulaire pour demander des paiements. Tous les champs sont <b>facultatifs</b>.</translation>
</message>
<message>
<source>An optional amount to request. Leave this empty or zero to not request a specific amount.</source>
<translation>Un montant facultatif à demander. Ne rien saisir ou un zéro pour ne pas demander de montant spécifique.</translation>
</message>
<message>
<source>Clear all fields of the form.</source>
<translation>Effacer tous les champs du formulaire.</translation>
</message>
<message>
<source>Clear</source>
<translation>Effacer</translation>
</message>
<message>
<source>Requested payments history</source>
<translation>Historique des paiements demandés</translation>
</message>
<message>
<source>&Request payment</source>
<translation>&Demander un paiement</translation>
</message>
<message>
<source>Show the selected request (does the same as double clicking an entry)</source>
<translation>Afficher la demande choisie (comme double-cliquer sur une entrée)</translation>
</message>
<message>
<source>Show</source>
<translation>Afficher</translation>
</message>
<message>
<source>Remove the selected entries from the list</source>
<translation>Retirer les entrées sélectionnées de la liste</translation>
</message>
<message>
<source>Remove</source>
<translation>Retirer</translation>
</message>
<message>
<source>Copy label</source>
<translation>Copier l’étiquette</translation>
</message>
<message>
<source>Copy message</source>
<translation>Copier le message</translation>
</message>
<message>
<source>Copy amount</source>
<translation>Copier le montant</translation>
</message>
</context>
<context>
<name>ReceiveRequestDialog</name>
<message>
<source>QR Code</source>
<translation>Code QR</translation>
</message>
<message>
<source>Copy &URI</source>
<translation>Copier l'&URI</translation>
</message>
<message>
<source>Copy &Address</source>
<translation>Copier l'&adresse</translation>
</message>
<message>
<source>&Save Image...</source>
<translation>&Enregistrer l'image...</translation>
</message>
<message>
<source>Request payment to %1</source>
<translation>Demande de paiement à %1</translation>
</message>
<message>
<source>Payment information</source>
<translation>Informations de paiement</translation>
</message>
<message>
<source>URI</source>
<translation>URI</translation>
</message>
<message>
<source>Address</source>
<translation>Adresse</translation>
</message>
<message>
<source>Amount</source>
<translation>Montant</translation>
</message>
<message>
<source>Label</source>
<translation>Étiquette</translation>
</message>
<message>
<source>Message</source>
<translation>Message</translation>
</message>
<message>
<source>Resulting URI too long, try to reduce the text for label / message.</source>
<translation>L'URI résultante est trop longue. Essayez de réduire le texte de l'étiquette ou du message.</translation>
</message>
<message>
<source>Error encoding URI into QR Code.</source>
<translation>Erreur d'encodage de l'URI en code QR.</translation>
</message>
</context>
<context>
<name>RecentRequestsTableModel</name>
<message>
<source>Date</source>
<translation>Date</translation>
</message>
<message>
<source>Label</source>
<translation>Étiquette</translation>
</message>
<message>
<source>Message</source>
<translation>Message</translation>
</message>
<message>
<source>(no label)</source>
<translation>(aucune étiquette)</translation>
</message>
<message>
<source>(no message)</source>
<translation>(aucun message)</translation>
</message>
<message>
<source>(no amount requested)</source>
<translation>(aucun montant demandé)</translation>
</message>
<message>
<source>Requested</source>
<translation>Demandée</translation>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<source>Send Coins</source>
<translation>Envoyer des pièces</translation>
</message>
<message>
<source>Coin Control Features</source>
<translation>Fonctions de contrôle des pièces</translation>
</message>
<message>
<source>Inputs...</source>
<translation>Entrants...</translation>
</message>
<message>
<source>automatically selected</source>
<translation>choisi automatiquement</translation>
</message>
<message>
<source>Insufficient funds!</source>
<translation>Fonds insuffisants !</translation>
</message>
<message>
<source>Quantity:</source>
<translation>Quantité :</translation>
</message>
<message>
<source>Bytes:</source>
<translation>Octets :</translation>
</message>
<message>
<source>Amount:</source>
<translation>Montant :</translation>
</message>
<message>
<source>Priority:</source>
<translation>Priorité :</translation>
</message>
<message>
<source>Fee:</source>
<translation>Frais :</translation>
</message>
<message>
<source>After Fee:</source>
<translation>Après les frais :</translation>
</message>
<message>
<source>Change:</source>
<translation>Monnaie :</translation>
</message>
<message>
<source>If this is activated, but the change address is empty or invalid, change will be sent to a newly generated address.</source>
<translation>Si cette option est activée, et l'adresse de monnaie rendue est vide ou invalide, la monnaie sera envoyée vers une adresse nouvellement générée.</translation>
</message>
<message>
<source>Custom change address</source>
<translation>Adresse personnalisée de monnaie rendue</translation>
</message>
<message>
<source>Transaction Fee:</source>
<translation>Frais de transaction :</translation>
</message>
<message>
<source>Choose...</source>
<translation>Choisir...</translation>
</message>
<message>
<source>collapse fee-settings</source>
<translation>réduire les paramètres des frais</translation>
</message>
<message>
<source>per kilobyte</source>
<translation>par kilo-octet</translation>
</message>
<message>
<source>If the custom fee is set to 1000 satoshis and the transaction is only 250 bytes, then "per kilobyte" only pays 250 satoshis in fee, while "total at least" pays 1000 satoshis. For transactions bigger than a kilobyte both pay by kilobyte.</source>
<translation>Si les frais personnalisés sont définis à 1 000 satoshis et que la transaction est seulement de 250 octets, donc le « par kilo-octet » ne paiera que 250 satoshis de frais, alors que le « total au moins » paiera 1 000 satoshis. Pour des transactions supérieures à un kilo-octet, les deux paieront par kilo-octets.</translation>
</message>
<message>
<source>Hide</source>
<translation>Cacher</translation>
</message>
<message>
<source>total at least</source>
<translation>total au moins</translation>
</message>
<message>
<source>Paying only the minimum fee is just fine as long as there is less transaction volume than space in the blocks. But be aware that this can end up in a never confirming transaction once there is more demand for bitcoinplus transactions than the network can process.</source>
<translation>Il est correct de payer les frais minimum tant que le volume transactionnel est inférieur à l'espace dans les blocs. Mais soyez conscient que cela pourrait résulter en une transaction n'étant jamais confirmée une fois qu'il y aura plus de transactions que le réseau ne pourra en traiter.</translation>
</message>
<message>
<source>(read the tooltip)</source>
<translation>(lire l'infobulle)</translation>
</message>
<message>
<source>Recommended:</source>
<translation>Recommandés :</translation>
</message>
<message>
<source>Custom:</source>
<translation>Personnalisés : </translation>
</message>
<message>
<source>(Smart fee not initialized yet. This usually takes a few blocks...)</source>
<translation>(Les frais intelligents ne sont pas encore initialisés. Cela prend habituellement quelques blocs...)</translation>
</message>
<message>
<source>Confirmation time:</source>
<translation>Temps de confirmation :</translation>
</message>
<message>
<source>normal</source>
<translation>normal</translation>
</message>
<message>
<source>fast</source>
<translation>rapide</translation>
</message>
<message>
<source>Send to multiple recipients at once</source>
<translation>Envoyer à plusieurs destinataires à la fois</translation>
</message>
<message>
<source>Add &Recipient</source>
<translation>Ajouter un &destinataire</translation>
</message>
<message>
<source>Clear all fields of the form.</source>
<translation>Effacer tous les champs du formulaire.</translation>
</message>
<message>
<source>Dust:</source>
<translation>Poussière :</translation>
</message>
<message>
<source>Clear &All</source>
<translation>&Tout effacer</translation>
</message>
<message>
<source>Balance:</source>
<translation>Solde :</translation>
</message>
<message>
<source>Confirm the send action</source>
<translation>Confirmer l’action d'envoi</translation>
</message>
<message>
<source>S&end</source>
<translation>E&nvoyer</translation>
</message>
<message>
<source>Copy quantity</source>
<translation>Copier la quantité</translation>
</message>
<message>
<source>Copy amount</source>
<translation>Copier le montant</translation>
</message>
<message>
<source>Copy fee</source>
<translation>Copier les frais</translation>
</message>
<message>
<source>Copy after fee</source>
<translation>Copier après les frais</translation>
</message>
<message>
<source>Copy bytes</source>
<translation>Copier les octets</translation>
</message>
<message>
<source>Copy priority</source>
<translation>Copier la priorité</translation>
</message>
<message>
<source>Copy dust</source>
<translation>Copier la poussière</translation>
</message>
<message>
<source>Copy change</source>
<translation>Copier la monnaie</translation>
</message>
<message>
<source>%1 to %2</source>
<translation>%1 à %2</translation>
</message>
<message>
<source>Are you sure you want to send?</source>
<translation>Voulez-vous vraiment envoyer ?</translation>
</message>
<message>
<source>added as transaction fee</source>
<translation>ajoutés comme frais de transaction</translation>
</message>
<message>
<source>Total Amount %1</source>
<translation>Montant total %1</translation>
</message>
<message>
<source>or</source>
<translation>ou</translation>
</message>
<message>
<source>Confirm send coins</source>
<translation>Confirmer l’envoi de pièces</translation>
</message>
<message>
<source>The recipient address is not valid. Please recheck.</source>
<translation>L'adresse du destinataire est invalide. Veuillez la revérifier.</translation>
</message>
<message>
<source>The amount to pay must be larger than 0.</source>
<translation>Le montant à payer doit être supérieur à 0.</translation>
</message>
<message>
<source>The amount exceeds your balance.</source>
<translation>Le montant dépasse votre solde.</translation>
</message>
<message>
<source>The total exceeds your balance when the %1 transaction fee is included.</source>
<translation>Le montant dépasse votre solde lorsque les frais de transaction de %1 sont inclus.</translation>
</message>
<message>
<source>Duplicate address found: addresses should only be used once each.</source>
<translation>Adresse identique trouvée : chaque adresse ne devrait être utilisée qu'une fois.</translation>
</message>
<message>
<source>Transaction creation failed!</source>
<translation>Échec de création de la transaction !</translation>
</message>
<message>
<source>The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation>La transaction a été rejetée ! Cela peut arriver si certaines pièces de votre porte-monnaie étaient déjà dépensées, par exemple si vous avez utilisé une copie de wallet.dat et que des pièces ont été dépensées dans la copie sans être marquées comme telles ici.</translation>
</message>
<message>
<source>A fee higher than %1 is considered an absurdly high fee.</source>
<translation>Des frais supérieurs à %1 sont considérés comme ridiculement élevés.</translation>
</message>
<message>
<source>Payment request expired.</source>
<translation>La demande de paiement a expiré</translation>
</message>
<message>
<source>Pay only the required fee of %1</source>
<translation>Payer seulement les frais exigés de %1</translation>
</message>
<message>
<source>Warning: Invalid BitcoinPlus address</source>
<translation>Avertissement : adresse BitcoinPlus invalide</translation>
</message>
<message>
<source>Warning: Unknown change address</source>
<translation>Avertissement : adresse de monnaie rendue inconnue</translation>
</message>
<message>
<source>(no label)</source>
<translation>(aucune étiquette)</translation>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<source>A&mount:</source>
<translation>&Montant :</translation>
</message>
<message>
<source>Pay &To:</source>
<translation>&Payer à :</translation>
</message>
<message>
<source>&Label:</source>
<translation>É&tiquette :</translation>
</message>
<message>
<source>Choose previously used address</source>
<translation>Choisir une adresse déjà utilisée</translation>
</message>
<message>
<source>This is a normal payment.</source>
<translation>Ceci est un paiement normal.</translation>
</message>
<message>
<source>The BitcoinPlus address to send the payment to</source>
<translation>L'adresse BitcoinPlus à laquelle envoyer le paiement</translation>
</message>
<message>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<source>Paste address from clipboard</source>
<translation>Coller l'adresse du presse-papiers</translation>
</message>
<message>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<source>Remove this entry</source>
<translation>Retirer cette entrée</translation>
</message>
<message>
<source>The fee will be deducted from the amount being sent. The recipient will receive less bitcoinplus than you enter in the amount field. If multiple recipients are selected, the fee is split equally.</source>
<translation>Les frais seront déduits du montant envoyé. Le destinataire recevra moins de bitcoinplus que le montant saisi dans le champ de montant. Si plusieurs destinataires sont sélectionnés, les frais seront partagés également..</translation>
</message>
<message>
<source>S&ubtract fee from amount</source>
<translation>S&oustraire les frais du montant</translation>
</message>
<message>
<source>Message:</source>
<translation>Message :</translation>
</message>
<message>
<source>This is an unauthenticated payment request.</source>
<translation>Cette demande de paiement n'est pas authentifiée.</translation>
</message>
<message>
<source>This is an authenticated payment request.</source>
<translation>Cette demande de paiement est authentifiée.</translation>
</message>
<message>
<source>Enter a label for this address to add it to the list of used addresses</source>
<translation>Saisir une étiquette pour cette adresse afin de l'ajouter à la liste d'adresses utilisées</translation>
</message>
<message>
<source>A message that was attached to the bitcoinplus: URI which will be stored with the transaction for your reference. Note: This message will not be sent over the BitcoinPlus network.</source>
<translation>Un message qui était joint à l'URI bitcoinplus: et qui sera stocké avec la transaction pour référence. Note : ce message ne sera pas envoyé par le réseau BitcoinPlus.</translation>
</message>
<message>
<source>Pay To:</source>
<translation>Payer à :</translation>
</message>
<message>
<source>Memo:</source>
<translation>Mémo :</translation>
</message>
<message>
<source>Enter a label for this address to add it to your address book</source>
<translation>Saisir une étiquette pour cette adresse afin de l’ajouter à votre carnet d’adresses</translation>
</message>
</context>
<context>
<name>SendConfirmationDialog</name>
<message>
<source>Yes</source>
<translation>Oui</translation>
</message>
</context>
<context>
<name>ShutdownWindow</name>
<message>
<source>%1 is shutting down...</source>
<translation>Arrêt de %1...</translation>
</message>
<message>
<source>Do not shut down the computer until this window disappears.</source>
<translation>Ne pas fermer l'ordinateur jusqu'à la disparition de cette fenêtre.</translation>
</message>
</context>
<context>
<name>SignVerifyMessageDialog</name>
<message>
<source>Signatures - Sign / Verify a Message</source>
<translation>Signatures - Signer / vérifier un message</translation>
</message>
<message>
<source>&Sign Message</source>
<translation>&Signer un message</translation>
</message>
<message>
<source>You can sign messages/agreements with your addresses to prove you can receive bitcoinplus sent to them. Be careful not to sign anything vague or random, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation>Vous pouvez signer des messages ou des accords avec vos adresses pour prouver que vous pouvez recevoir des bitcoinplus à ces dernières. Faites attention de ne rien signer de vague ou au hasard, car des attaques d'hameçonnage pourraient essayer de vous faire signer avec votre identité afin de l'usurper. Ne signez que des déclarations entièrement détaillées et avec lesquelles vous êtes d'accord.</translation>
</message>
<message>
<source>The BitcoinPlus address to sign the message with</source>
<translation>L'adresse BitcoinPlus avec laquelle signer le message</translation>
</message>
<message>
<source>Choose previously used address</source>
<translation>Choisir une adresse déjà utilisée</translation>
</message>
<message>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<source>Paste address from clipboard</source>
<translation>Coller une adresse du presse-papiers</translation>
</message>
<message>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<source>Enter the message you want to sign here</source>
<translation>Saisir ici le message que vous désirez signer</translation>
</message>
<message>
<source>Signature</source>
<translation>Signature</translation>
</message>
<message>
<source>Copy the current signature to the system clipboard</source>
<translation>Copier la signature actuelle dans le presse-papiers</translation>
</message>
<message>
<source>Sign the message to prove you own this BitcoinPlus address</source>
<translation>Signer le message afin de prouver que vous détenez cette adresse BitcoinPlus</translation>
</message>
<message>
<source>Sign &Message</source>
<translation>Signer le &message</translation>
</message>
<message>
<source>Reset all sign message fields</source>
<translation>Réinitialiser tous les champs de signature de message</translation>
</message>
<message>
<source>Clear &All</source>
<translation>&Tout effacer</translation>
</message>
<message>
<source>&Verify Message</source>
<translation>&Vérifier un message</translation>
</message>
<message>
<source>Enter the receiver's address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack. Note that this only proves the signing party receives with the address, it cannot prove sendership of any transaction!</source>
<translation>Saisir ci-dessous l'adresse du destinataire, le message (s'assurer de copier fidèlement les retours à la ligne, les espaces, les tabulations, etc.) et la signature pour vérifier le message. Faire attention à ne pas déduire davantage de la signature que ce qui est contenu dans le message signé même, pour éviter d'être trompé par une attaque d'homme du milieu. Prendre en compte que cela ne fait que prouver que le signataire reçoit l'adresse et ne peut pas prouver la provenance d'une transaction !</translation>
</message>
<message>
<source>The BitcoinPlus address the message was signed with</source>
<translation>L'adresse BitcoinPlus avec laquelle le message a été signé</translation>
</message>
<message>
<source>Verify the message to ensure it was signed with the specified BitcoinPlus address</source>
<translation>Vérifier le message pour s'assurer qu'il a été signé avec l'adresse BitcoinPlus spécifiée</translation>
</message>
<message>
<source>Verify &Message</source>
<translation>Vérifier le &message</translation>
</message>
<message>
<source>Reset all verify message fields</source>
<translation>Réinitialiser tous les champs de vérification de message</translation>
</message>
<message>
<source>Click "Sign Message" to generate signature</source>
<translation>Cliquez sur « Signer le message » pour générer la signature</translation>
</message>
<message>
<source>The entered address is invalid.</source>
<translation>L'adresse saisie est invalide.</translation>
</message>
<message>
<source>Please check the address and try again.</source>
<translation>Veuillez vérifier l'adresse et ressayer.</translation>
</message>
<message>
<source>The entered address does not refer to a key.</source>
<translation>L'adresse saisie ne fait pas référence à une clé.</translation>
</message>
<message>
<source>Wallet unlock was cancelled.</source>
<translation>Le déverrouillage du porte-monnaie a été annulé.</translation>
</message>
<message>
<source>Private key for the entered address is not available.</source>
<translation>La clé privée n'est pas disponible pour l'adresse saisie.</translation>
</message>
<message>
<source>Message signing failed.</source>
<translation>Échec de signature du message.</translation>
</message>
<message>
<source>Message signed.</source>
<translation>Le message a été signé.</translation>
</message>
<message>
<source>The signature could not be decoded.</source>
<translation>La signature n'a pu être décodée.</translation>
</message>
<message>
<source>Please check the signature and try again.</source>
<translation>Veuillez vérifier la signature et ressayer.</translation>
</message>
<message>
<source>The signature did not match the message digest.</source>
<translation>La signature ne correspond pas au condensé du message.</translation>
</message>
<message>
<source>Message verification failed.</source>
<translation>Échec de vérification du message.</translation>
</message>
<message>
<source>Message verified.</source>
<translation>Le message a été vérifié.</translation>
</message>
</context>
<context>
<name>SplashScreen</name>
<message>
<source>[testnet]</source>
<translation>[testnet]</translation>
</message>
</context>
<context>
<name>TrafficGraphWidget</name>
<message>
<source>KB/s</source>
<translation>Ko/s</translation>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message numerus="yes">
<source>Open for %n more block(s)</source>
<translation><numerusform>Ouvert pour %n bloc de plus</numerusform><numerusform>Ouvert pour %n blocs de plus</numerusform></translation>
</message>
<message>
<source>Open until %1</source>
<translation>Ouvert jusqu'à %1</translation>
</message>
<message>
<source>conflicted with a transaction with %1 confirmations</source>
<translation>est en conflit avec une transaction ayant %1 confirmations</translation>
</message>
<message>
<source>%1/offline</source>
<translation>%1/hors ligne</translation>
</message>
<message>
<source>0/unconfirmed, %1</source>
<translation>0/non confirmées, %1</translation>
</message>
<message>
<source>in memory pool</source>
<translation>dans la réserve de mémoire</translation>
</message>
<message>
<source>not in memory pool</source>
<translation>pas dans la réserve de mémoire</translation>
</message>
<message>
<source>abandoned</source>
<translation>abandonnée</translation>
</message>
<message>
<source>%1/unconfirmed</source>
<translation>%1/non confirmée</translation>
</message>
<message>
<source>%1 confirmations</source>
<translation>%1 confirmations</translation>
</message>
<message>
<source>Status</source>
<translation>État</translation>
</message>
<message>
<source>, has not been successfully broadcast yet</source>
<translation>, n’a pas encore été diffusée avec succès</translation>
</message>
<message numerus="yes">
<source>, broadcast through %n node(s)</source>
<translation><numerusform>, diffusée à travers %n nœud</numerusform><numerusform>, diffusée à travers %n nœuds</numerusform></translation>
</message>
<message>
<source>Date</source>
<translation>Date</translation>
</message>
<message>
<source>Source</source>
<translation>Source</translation>
</message>
<message>
<source>Generated</source>
<translation>Générée</translation>
</message>
<message>
<source>From</source>
<translation>De</translation>
</message>
<message>
<source>unknown</source>
<translation>inconnue</translation>
</message>
<message>
<source>To</source>
<translation>À</translation>
</message>
<message>
<source>own address</source>
<translation>votre adresse</translation>
</message>
<message>
<source>watch-only</source>
<translation>juste-regarder</translation>
</message>
<message>
<source>label</source>
<translation>étiquette</translation>
</message>
<message>
<source>Credit</source>
<translation>Crédit</translation>
</message>
<message numerus="yes">
<source>matures in %n more block(s)</source>
<translation><numerusform>arrive à maturité dans %n bloc de plus</numerusform><numerusform>arrive à maturité dans %n blocs de plus</numerusform></translation>
</message>
<message>
<source>not accepted</source>
<translation>refusée</translation>
</message>
<message>
<source>Debit</source>
<translation>Débit</translation>
</message>
<message>
<source>Total debit</source>
<translation>Débit total</translation>
</message>
<message>
<source>Total credit</source>
<translation>Crédit total</translation>
</message>
<message>
<source>Transaction fee</source>
<translation>Frais de transaction</translation>
</message>
<message>
<source>Net amount</source>
<translation>Montant net</translation>
</message>
<message>
<source>Message</source>
<translation>Message</translation>
</message>
<message>
<source>Comment</source>
<translation>Commentaire</translation>
</message>
<message>
<source>Transaction ID</source>
<translation>ID de la transaction</translation>
</message>
<message>
<source>Output index</source>
<translation>Index de sorties</translation>
</message>
<message>
<source>Merchant</source>
<translation>Marchand</translation>
</message>
<message>
<source>Generated coins must mature %1 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source>
<translation>Les pièces générées doivent mûrir pendant %1 blocs avant de pouvoir être dépensées. Lorsque ce bloc a été généré, il a été diffusé sur le réseau pour être ajouté à la chaîne de blocs. Si son intégration à la chaîne échoue, son état sera modifié en « refusée » et il ne sera pas possible de le dépenser. Cela peut arriver occasionnellement si un autre nœud génère un bloc à quelques secondes du vôtre.</translation>
</message>
<message>
<source>Debug information</source>
<translation>Informations de débogage</translation>
</message>
<message>
<source>Transaction</source>
<translation>Transaction</translation>
</message>
<message>
<source>Inputs</source>
<translation>Entrées</translation>
</message>
<message>
<source>Amount</source>
<translation>Montant</translation>
</message>
<message>
<source>true</source>
<translation>vrai</translation>
</message>
<message>
<source>false</source>
<translation>faux</translation>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<source>This pane shows a detailed description of the transaction</source>
<translation>Ce panneau affiche une description détaillée de la transaction</translation>
</message>
<message>
<source>Details for %1</source>
<translation>Détails de %1</translation>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<source>Date</source>
<translation>Date</translation>
</message>
<message>
<source>Type</source>
<translation>Type</translation>
</message>
<message>
<source>Label</source>
<translation>Étiquette</translation>
</message>
<message numerus="yes">
<source>Open for %n more block(s)</source>
<translation><numerusform>Ouvert pour %n bloc de plus</numerusform><numerusform>Ouvert pour %n blocs de plus</numerusform></translation>
</message>
<message><|fim▁hole|> <translation>Ouvert jusqu'à %1</translation>
</message>
<message>
<source>Offline</source>
<translation>Hors ligne</translation>
</message>
<message>
<source>Unconfirmed</source>
<translation>Non confirmée</translation>
</message>
<message>
<source>Abandoned</source>
<translation>Abandonnée</translation>
</message>
<message>
<source>Confirming (%1 of %2 recommended confirmations)</source>
<translation>Confirmation (%1 sur %2 confirmations recommandées)</translation>
</message>
<message>
<source>Confirmed (%1 confirmations)</source>
<translation>Confirmée (%1 confirmations)</translation>
</message>
<message>
<source>Conflicted</source>
<translation>En conflit</translation>
</message>
<message>
<source>Immature (%1 confirmations, will be available after %2)</source>
<translation>Immature (%1 confirmations, sera disponible après %2)</translation>
</message>
<message>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation>Ce bloc n’a été reçu par aucun autre nœud et ne sera probablement pas accepté !</translation>
</message>
<message>
<source>Generated but not accepted</source>
<translation>Générée mais refusée</translation>
</message>
<message>
<source>Received with</source>
<translation>Reçue avec</translation>
</message>
<message>
<source>Received from</source>
<translation>Reçue de</translation>
</message>
<message>
<source>Sent to</source>
<translation>Envoyée à</translation>
</message>
<message>
<source>Payment to yourself</source>
<translation>Paiement à vous-même</translation>
</message>
<message>
<source>Mined</source>
<translation>Miné</translation>
</message>
<message>
<source>watch-only</source>
<translation>juste-regarder</translation>
</message>
<message>
<source>(n/a)</source>
<translation>(n.d)</translation>
</message>
<message>
<source>(no label)</source>
<translation>(aucune étiquette)</translation>
</message>
<message>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation>État de la transaction. Survoler ce champ avec la souris pour afficher le nombre de confirmations.</translation>
</message>
<message>
<source>Date and time that the transaction was received.</source>
<translation>Date et heure de réception de la transaction.</translation>
</message>
<message>
<source>Type of transaction.</source>
<translation>Type de transaction.</translation>
</message>
<message>
<source>Whether or not a watch-only address is involved in this transaction.</source>
<translation>Une adresse juste-regarder est-elle ou non impliquée dans cette transaction.</translation>
</message>
<message>
<source>User-defined intent/purpose of the transaction.</source>
<translation>Intention/but de la transaction défini par l'utilisateur.</translation>
</message>
<message>
<source>Amount removed from or added to balance.</source>
<translation>Le montant a été ajouté ou soustrait du solde.</translation>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<source>All</source>
<translation>Toutes</translation>
</message>
<message>
<source>Today</source>
<translation>Aujourd’hui</translation>
</message>
<message>
<source>This week</source>
<translation>Cette semaine</translation>
</message>
<message>
<source>This month</source>
<translation>Ce mois</translation>
</message>
<message>
<source>Last month</source>
<translation>Le mois dernier</translation>
</message>
<message>
<source>This year</source>
<translation>Cette année</translation>
</message>
<message>
<source>Range...</source>
<translation>Plage…</translation>
</message>
<message>
<source>Received with</source>
<translation>Reçue avec</translation>
</message>
<message>
<source>Sent to</source>
<translation>Envoyée à</translation>
</message>
<message>
<source>To yourself</source>
<translation>À vous-même</translation>
</message>
<message>
<source>Mined</source>
<translation>Miné </translation>
</message>
<message>
<source>Other</source>
<translation>Autres </translation>
</message>
<message>
<source>Enter address or label to search</source>
<translation>Saisir une adresse ou une étiquette à rechercher</translation>
</message>
<message>
<source>Min amount</source>
<translation>Montant min.</translation>
</message>
<message>
<source>Abandon transaction</source>
<translation>Abandonner la transaction</translation>
</message>
<message>
<source>Copy address</source>
<translation>Copier l’adresse</translation>
</message>
<message>
<source>Copy label</source>
<translation>Copier l’étiquette </translation>
</message>
<message>
<source>Copy amount</source>
<translation>Copier le montant </translation>
</message>
<message>
<source>Copy transaction ID</source>
<translation>Copier l'ID de la transaction</translation>
</message>
<message>
<source>Copy raw transaction</source>
<translation>Copier la transaction brute</translation>
</message>
<message>
<source>Copy full transaction details</source>
<translation>Copier tous les détails de la transaction</translation>
</message>
<message>
<source>Edit label</source>
<translation>Modifier l’étiquette </translation>
</message>
<message>
<source>Show transaction details</source>
<translation>Afficher les détails de la transaction</translation>
</message>
<message>
<source>Export Transaction History</source>
<translation>Exporter l'historique transactionnel</translation>
</message>
<message>
<source>Comma separated file (*.csv)</source>
<translation>Valeurs séparées par des virgules (*.csv)</translation>
</message>
<message>
<source>Confirmed</source>
<translation>Confirmée</translation>
</message>
<message>
<source>Watch-only</source>
<translation>Juste-regarder</translation>
</message>
<message>
<source>Date</source>
<translation>Date </translation>
</message>
<message>
<source>Type</source>
<translation>Type </translation>
</message>
<message>
<source>Label</source>
<translation>Étiquette</translation>
</message>
<message>
<source>Address</source>
<translation>Adresse</translation>
</message>
<message>
<source>ID</source>
<translation>ID </translation>
</message>
<message>
<source>Exporting Failed</source>
<translation>Échec d'exportation</translation>
</message>
<message>
<source>There was an error trying to save the transaction history to %1.</source>
<translation>Une erreur est survenue lors de l'enregistrement de l'historique transactionnel vers %1.</translation>
</message>
<message>
<source>Exporting Successful</source>
<translation>L'exportation est réussie</translation>
</message>
<message>
<source>The transaction history was successfully saved to %1.</source>
<translation>L'historique transactionnel a été enregistré avec succès vers %1.</translation>
</message>
<message>
<source>Range:</source>
<translation>Plage :</translation>
</message>
<message>
<source>to</source>
<translation>à </translation>
</message>
</context>
<context>
<name>UnitDisplayStatusBarControl</name>
<message>
<source>Unit to show amounts in. Click to select another unit.</source>
<translation>Unité d'affichage des montants. Cliquer pour choisir une autre unité.</translation>
</message>
</context>
<context>
<name>WalletFrame</name>
<message>
<source>No wallet has been loaded.</source>
<translation>Aucun porte-monnaie n'a été chargé.</translation>
</message>
</context>
<context>
<name>WalletModel</name>
<message>
<source>Send Coins</source>
<translation>Envoyer des pièces</translation>
</message>
</context>
<context>
<name>WalletView</name>
<message>
<source>&Export</source>
<translation>&Exporter</translation>
</message>
<message>
<source>Export the data in the current tab to a file</source>
<translation>Exporter les données de l'onglet actuel vers un fichier</translation>
</message>
<message>
<source>Backup Wallet</source>
<translation>Sauvegarder le porte-monnaie</translation>
</message>
<message>
<source>Wallet Data (*.dat)</source>
<translation>Données du porte-monnaie (*.dat)</translation>
</message>
<message>
<source>Backup Failed</source>
<translation>Échec de la sauvegarde</translation>
</message>
<message>
<source>There was an error trying to save the wallet data to %1.</source>
<translation>Une erreur est survenue lors de l'enregistrement des données du porte-monnaie vers %1.</translation>
</message>
<message>
<source>Backup Successful</source>
<translation>La sauvegarde est réussie</translation>
</message>
<message>
<source>The wallet data was successfully saved to %1.</source>
<translation>Les données du porte-monnaie ont été enregistrées avec succès vers %1</translation>
</message>
</context>
<context>
<name>bitcoin-core</name>
<message>
<source>Options:</source>
<translation>Options :</translation>
</message>
<message>
<source>Specify data directory</source>
<translation>Spécifier le répertoire de données</translation>
</message>
<message>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation>Se connecter à un nœud pour obtenir des adresses de pairs puis se déconnecter</translation>
</message>
<message>
<source>Specify your own public address</source>
<translation>Spécifier votre propre adresse publique</translation>
</message>
<message>
<source>Accept command line and JSON-RPC commands</source>
<translation>Accepter les commandes JSON-RPC et en ligne de commande</translation>
</message>
<message>
<source>If <category> is not supplied or if <category> = 1, output all debugging information.</source>
<translation>Si <category> n'est pas indiqué ou si <category> = 1, extraire toutes les données de débogage.</translation>
</message>
<message>
<source>Prune configured below the minimum of %d MiB. Please use a higher number.</source>
<translation>L'élagage est configuré au-dessous du minimum de %d Mio. Veuillez utiliser un nombre plus élevé.</translation>
</message>
<message>
<source>Prune: last wallet synchronisation goes beyond pruned data. You need to -reindex (download the whole blockchain again in case of pruned node)</source>
<translation>Élagage : la dernière synchronisation de porte-monnaie va par-delà les données élaguées. Vous devez -reindex (réindexer, télécharger de nouveau toute la chaîne de blocs en cas de nœud élagué)</translation>
</message>
<message>
<source>Reduce storage requirements by pruning (deleting) old blocks. This mode is incompatible with -txindex and -rescan. Warning: Reverting this setting requires re-downloading the entire blockchain. (default: 0 = disable pruning blocks, >%u = target size in MiB to use for block files)</source>
<translation>Réduire les exigences de stockage en élaguant (supprimant) les anciens blocs. Ce mode est incompatible avec -txindex et -rescan. Avertissement : ramener ce paramètre à sa valeur antérieure exige un nouveau téléchargement de la chaîne de blocs en entier (par défaut : 0 = désactiver l'élagage des blocs, >%u = taille cible en Mio à utiliser pour les fichiers de blocs).</translation>
</message>
<message>
<source>Rescans are not possible in pruned mode. You will need to use -reindex which will download the whole blockchain again.</source>
<translation>Les rebalayages sont impossibles en mode élagage. Vous devrez utiliser -reindex, ce qui téléchargera de nouveau la chaîne de blocs en entier.</translation>
</message>
<message>
<source>Error: A fatal internal error occurred, see debug.log for details</source>
<translation>Erreur : une erreur interne fatale s'est produite. Voir debug.log pour plus de détails</translation>
</message>
<message>
<source>Fee (in %s/kB) to add to transactions you send (default: %s)</source>
<translation>Les frais (en %s/ko) à ajouter aux transactions que vous envoyez (par défaut : %s)</translation>
</message>
<message>
<source>Pruning blockstore...</source>
<translation>Élagage du magasin de blocs...</translation>
</message>
<message>
<source>Run in the background as a daemon and accept commands</source>
<translation>Fonctionner en arrière-plan en tant que démon et accepter les commandes</translation>
</message>
<message>
<source>Unable to start HTTP server. See debug log for details.</source>
<translation>Impossible de démarrer le serveur HTTP. Voir le journal de débogage pour plus de détails.</translation>
</message>
<message>
<source>Accept connections from outside (default: 1 if no -proxy or -connect)</source>
<translation>Accepter les connexions entrantes (par défaut : 1 si aucun -proxy ou -connect )</translation>
</message>
<message>
<source>BitcoinPlus Core</source>
<translation>BitcoinPlus Core</translation>
</message>
<message>
<source>-fallbackfee is set very high! This is the transaction fee you may pay when fee estimates are not available.</source>
<translation>La valeur -fallbackfee est très élevée ! Elle représente les frais de transaction que vous pourriez acquitter si aucune estimation de frais n'est proposée.</translation>
</message>
<message>
<source>A fee rate (in %s/kB) that will be used when fee estimation has insufficient data (default: %s)</source>
<translation>Un taux de frais (en %s/Ko) qui sera utilisé si l'estimation de frais ne possède pas suffisamment de données (par défaut : %s)</translation>
</message>
<message>
<source>Accept relayed transactions received from whitelisted peers even when not relaying transactions (default: %d)</source>
<translation>Accepter les transactions relayées reçues de pairs de la liste blanche même si le nœud ne relaie pas les transactions (par défaut : %d)</translation>
</message>
<message>
<source>Bind to given address and always listen on it. Use [host]:port notation for IPv6</source>
<translation>Se lier à l'adresse donnée et toujours l'écouter. Utiliser la notation [host]:port pour l'IPv6</translation>
</message>
<message>
<source>Delete all wallet transactions and only recover those parts of the blockchain through -rescan on startup</source>
<translation>Supprimer toutes les transactions du porte-monnaie et ne récupérer que ces parties de la chaîne de blocs avec -rescan au démarrage</translation>
</message>
<message>
<source>Distributed under the MIT software license, see the accompanying file COPYING or <http://www.opensource.org/licenses/mit-license.php>.</source>
<translation>Distribué sous la licence MIT d'utilisation d'un logiciel. Consultez le fichier joint COPYING ou <http://www.opensource.org/licenses/mit-license.php>.</translation>
</message>
<message>
<source>Equivalent bytes per sigop in transactions for relay and mining (default: %u)</source>
<translation>Octets équivalents par sigop dans les transactions pour relayer et miner (par défaut : %u)</translation>
</message>
<message>
<source>Error loading %s: You can't enable HD on a already existing non-HD wallet</source>
<translation>Erreur de chargement de %s : vous ne pouvez pas activer HD sur un porte-monnaie non HD existant</translation>
</message>
<message>
<source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source>
<translation>Exécuter la commande lorsqu'une transaction de porte-monnaie change (%s dans la commande est remplacée par TxID)</translation>
</message>
<message>
<source>Maximum allowed median peer time offset adjustment. Local perspective of time may be influenced by peers forward or backward by this amount. (default: %u seconds)</source>
<translation>Réglage moyen maximal autorisé de décalage de l'heure d'un pair. La perspective locale du temps peut être influencée par les pairs, en avance ou en retard, de cette valeur. (Par défaut : %u secondes)</translation>
</message>
<message>
<source>Maximum total fees (in %s) to use in a single wallet transaction or raw transaction; setting this too low may abort large transactions (default: %s)</source>
<translation>Frais totaux maximaux (en %s) à utiliser en une seule transaction de porte-monnaie ou transaction brute ; les définir trop bas pourrait interrompre les grosses transactions (par défaut : %s)</translation>
</message>
<message>
<source>Please contribute if you find %s useful. Visit %s for further information about the software.</source>
<translation>Si vous trouvez %s utile, vous pouvez y contribuer. Vous trouverez davantage d'informations à propos du logiciel sur %s.</translation>
</message>
<message>
<source>Set the number of script verification threads (%u to %d, 0 = auto, <0 = leave that many cores free, default: %d)</source>
<translation>Définir le nombre de fils de vérification des scripts (%u à %d, 0 = auto, < 0 = laisser ce nombre de cœurs inutilisés, par défaut : %d)</translation>
</message>
<message>
<source>The block database contains a block which appears to be from the future. This may be due to your computer's date and time being set incorrectly. Only rebuild the block database if you are sure that your computer's date and time are correct</source>
<translation>La base de données de blocs contient un bloc qui semble provenir du futur. Cela pourrait être causé par la date et l'heure erronées de votre ordinateur. Ne reconstruisez la base de données de blocs que si vous êtes certain que la date et l'heure de votre ordinateur sont justes.</translation>
</message>
<message>
<source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source>
<translation>Ceci est une préversion de test - l'utiliser à vos risques - ne pas l'utiliser pour miner ou pour des applications marchandes</translation>
</message>
<message>
<source>Unable to rewind the database to a pre-fork state. You will need to redownload the blockchain</source>
<translation>Impossible de rebobiner la base de données à un état préfourche. Vous devrez retélécharger la chaîne de blocs</translation>
</message>
<message>
<source>Use UPnP to map the listening port (default: 1 when listening and no -proxy)</source>
<translation>Utiliser l'UPnP pour mapper le port d'écoute (par défaut : 1 en écoute et sans -proxy)</translation>
</message>
<message>
<source>Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues.</source>
<translation>Avertissement : le réseau ne semble pas totalement d'accord ! Quelques mineurs semblent éprouver des difficultés.</translation>
</message>
<message>
<source>Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade.</source>
<translation>Avertissement : nous ne semblons pas être en accord complet avec nos pairs ! Vous pourriez avoir besoin d'effectuer une mise à niveau, ou d'autres nœuds du réseau pourraient avoir besoin d'effectuer une mise à niveau.</translation>
</message>
<message>
<source>Whitelist peers connecting from the given netmask or IP address. Can be specified multiple times.</source>
<translation>Pairs de la liste blanche se connectant à partir du masque réseau ou de l'IP donné. Peut être spécifié plusieurs fois.</translation>
</message>
<message>
<source>You need to rebuild the database using -reindex-chainstate to change -txindex</source>
<translation>Vous devez reconstruire la base de données avec -reindex-chainstate pour changer -txindex</translation>
</message>
<message>
<source>%s corrupt, salvage failed</source>
<translation>%s corrompu, la récupération a échoué</translation>
</message>
<message>
<source>-maxmempool must be at least %d MB</source>
<translation>-maxmempool doit être d'au moins %d Mo</translation>
</message>
<message>
<source><category> can be:</source>
<translation><category> peut être :</translation>
</message>
<message>
<source>Append comment to the user agent string</source>
<translation>Ajouter un commentaire à la chaîne d'agent utilisateur</translation>
</message>
<message>
<source>Attempt to recover private keys from a corrupt wallet on startup</source>
<translation>Tenter de récupérer les clés privées d'un porte-monnaie corrompu lors du démarrage</translation>
</message>
<message>
<source>Block creation options:</source>
<translation>Options de création de blocs :</translation>
</message>
<message>
<source>Cannot resolve -%s address: '%s'</source>
<translation>Impossible de résoudre l'adresse -%s : « %s »</translation>
</message>
<message>
<source>Change index out of range</source>
<translation>L'index de changement est hors échelle</translation>
</message>
<message>
<source>Connect only to the specified node(s)</source>
<translation>Ne se connecter qu'au(x) nœud(s) spécifié(s)</translation>
</message>
<message>
<source>Connection options:</source>
<translation>Options de connexion :</translation>
</message>
<message>
<source>Copyright (C) %i-%i</source>
<translation>Tous droits réservés (C) %i-%i</translation>
</message>
<message>
<source>Corrupted block database detected</source>
<translation>Une base de données de blocs corrompue a été détectée</translation>
</message>
<message>
<source>Debugging/Testing options:</source>
<translation>Options de débogage/de test :</translation>
</message>
<message>
<source>Do not load the wallet and disable wallet RPC calls</source>
<translation>Ne pas charger le porte-monnaie et désactiver les appels RPC</translation>
</message>
<message>
<source>Do you want to rebuild the block database now?</source>
<translation>Voulez-vous reconstruire la base de données de blocs maintenant ?</translation>
</message>
<message>
<source>Enable publish hash block in <address></source>
<translation>Activer la publication du bloc de hachage dans <address></translation>
</message>
<message>
<source>Enable publish hash transaction in <address></source>
<translation>Activer la publication de la transaction de hachage dans <address></translation>
</message>
<message>
<source>Enable publish raw block in <address></source>
<translation>Activer la publication du bloc brut dans <address></translation>
</message>
<message>
<source>Enable publish raw transaction in <address></source>
<translation>Activer la publication de la transaction brute dans <address></translation>
</message>
<message>
<source>Error initializing block database</source>
<translation>Erreur d'initialisation de la base de données de blocs</translation>
</message>
<message>
<source>Error initializing wallet database environment %s!</source>
<translation>Erreur d'initialisation de l'environnement de la base de données du porte-monnaie %s !</translation>
</message>
<message>
<source>Error loading %s</source>
<translation>Erreur de chargement de %s</translation>
</message>
<message>
<source>Error loading %s: Wallet corrupted</source>
<translation>Erreur de chargement de %s : porte-monnaie corrompu</translation>
</message>
<message>
<source>Error loading %s: Wallet requires newer version of %s</source>
<translation>Erreur de chargement de %s : le porte-monnaie exige une version plus récente de %s</translation>
</message>
<message>
<source>Error loading %s: You can't disable HD on a already existing HD wallet</source>
<translation>Erreur de chargement de %s : vous ne pouvez pas désactiver HD sur un porte-monnaie HD existant</translation>
</message>
<message>
<source>Error loading block database</source>
<translation>Erreur de chargement de la base de données de blocs</translation>
</message>
<message>
<source>Error opening block database</source>
<translation>Erreur d'ouverture de la base de données de blocs</translation>
</message>
<message>
<source>Error: Disk space is low!</source>
<translation>Erreur : l'espace disque est faible !</translation>
</message>
<message>
<source>Failed to listen on any port. Use -listen=0 if you want this.</source>
<translation>Échec d'écoute sur un port quelconque. Utiliser -listen=0 si vous le voulez.</translation>
</message>
<message>
<source>Importing...</source>
<translation>Importation...</translation>
</message>
<message>
<source>Incorrect or no genesis block found. Wrong datadir for network?</source>
<translation>Bloc de genèse incorrect ou introuvable. Mauvais datadir pour le réseau ?</translation>
</message>
<message>
<source>Invalid -onion address: '%s'</source>
<translation>Adresse -onion invalide : « %s »</translation>
</message>
<message>
<source>Invalid amount for -%s=<amount>: '%s'</source>
<translation>Montant invalide pour -%s=<amount> : « %s »</translation>
</message>
<message>
<source>Invalid amount for -fallbackfee=<amount>: '%s'</source>
<translation>Montant invalide pour -fallbackfee=<amount> : « %s »</translation>
</message>
<message>
<source>Keep the transaction memory pool below <n> megabytes (default: %u)</source>
<translation>Garder la réserve de mémoire transactionnelle sous <n> mégaoctets (par défaut : %u)</translation>
</message>
<message>
<source>Loading banlist...</source>
<translation>Chargement de la liste d'interdiction...</translation>
</message>
<message>
<source>Location of the auth cookie (default: data dir)</source>
<translation>Emplacement du fichier témoin auth (par défaut : data dir)</translation>
</message>
<message>
<source>Not enough file descriptors available.</source>
<translation>Pas assez de descripteurs de fichiers proposés.</translation>
</message>
<message>
<source>Only connect to nodes in network <net> (ipv4, ipv6 or onion)</source>
<translation>Seulement se connecter aux nœuds du réseau <net> (IPv4, IPv6 ou oignon)</translation>
</message>
<message>
<source>Print this help message and exit</source>
<translation>Imprimer ce message d'aide et quitter</translation>
</message>
<message>
<source>Print version and exit</source>
<translation>Imprimer la version et quitter</translation>
</message>
<message>
<source>Prune cannot be configured with a negative value.</source>
<translation>L'élagage ne peut pas être configuré avec une valeur négative.</translation>
</message>
<message>
<source>Prune mode is incompatible with -txindex.</source>
<translation>Le mode élagage n'est pas compatible avec -txindex.</translation>
</message>
<message>
<source>Rebuild chain state and block index from the blk*.dat files on disk</source>
<translation>Reconstruire l'état de la chaîne et l'index des blocs à partir des fichiers blk*.dat sur le disque</translation>
</message>
<message>
<source>Rebuild chain state from the currently indexed blocks</source>
<translation>Reconstruire l'état de la chaîne à partir des blocs indexés actuellement</translation>
</message>
<message>
<source>Rewinding blocks...</source>
<translation>Rebobinage des blocs...</translation>
</message>
<message>
<source>Set database cache size in megabytes (%d to %d, default: %d)</source>
<translation>Définir la taille du cache de la base de données en mégaoctets (%d à %d, default: %d)</translation>
</message>
<message>
<source>Set maximum BIP141 block weight (default: %d)</source>
<translation>Définir le poids maximal de bloc BIP141 (par défaut : %d)</translation>
</message>
<message>
<source>Set maximum block size in bytes (default: %d)</source>
<translation>Définir la taille minimale de bloc en octets (par défaut : %d)</translation>
</message>
<message>
<source>Specify wallet file (within data directory)</source>
<translation>Spécifiez le fichier de porte-monnaie (dans le répertoire de données)</translation>
</message>
<message>
<source>Starting network threads...</source>
<translation>Démarrage des processus réseau...</translation>
</message>
<message>
<source>The source code is available from %s.</source>
<translation>Le code source est disponible sur %s.</translation>
</message>
<message>
<source>Unsupported argument -benchmark ignored, use -debug=bench.</source>
<translation>Argument non pris en charge -benchmark ignoré, utiliser -debug=bench.</translation>
</message>
<message>
<source>Unsupported argument -debugnet ignored, use -debug=net.</source>
<translation>Argument non pris en charge -debugnet ignoré, utiliser -debug=net.</translation>
</message>
<message>
<source>Unsupported argument -tor found, use -onion.</source>
<translation>Argument non pris en charge -tor trouvé, utiliser -onion</translation>
</message>
<message>
<source>Use UPnP to map the listening port (default: %u)</source>
<translation>Utiliser l'UPnP pour mapper le port d'écoute (par défaut : %u)</translation>
</message>
<message>
<source>User Agent comment (%s) contains unsafe characters.</source>
<translation>Le commentaire d'agent utilisateur (%s) contient des caractères dangereux.</translation>
</message>
<message>
<source>Verifying blocks...</source>
<translation>Vérification des blocs... </translation>
</message>
<message>
<source>Verifying wallet...</source>
<translation>Vérification du porte-monnaie...</translation>
</message>
<message>
<source>Wallet %s resides outside data directory %s</source>
<translation>Le porte-monnaie %s réside en dehors du répertoire de données %s</translation>
</message>
<message>
<source>Wallet debugging/testing options:</source>
<translation>Options de débogage/de test du porte-monnaie :</translation>
</message>
<message>
<source>Wallet options:</source>
<translation>Options du porte-monnaie :</translation>
</message>
<message>
<source>Allow JSON-RPC connections from specified source. Valid for <ip> are a single IP (e.g. 1.2.3.4), a network/netmask (e.g. 1.2.3.4/255.255.255.0) or a network/CIDR (e.g. 1.2.3.4/24). This option can be specified multiple times</source>
<translation>Permettre les connexions JSON-RPC de sources spécifiques. Valide pour <ip> qui sont une IP simple (p. ex. 1.2.3.4), un réseau/masque réseau (p. ex. 1.2.3.4/255.255.255.0) ou un réseau/CIDR (p. ex. 1.2.3.4/24). Cette option peut être être spécifiée plusieurs fois</translation>
</message>
<message>
<source>Bind to given address and whitelist peers connecting to it. Use [host]:port notation for IPv6</source>
<translation>Se lier à l'adresse donnée et aux pairs s'y connectant. Utiliser la notation [host]:port pour l'IPv6</translation>
</message>
<message>
<source>Bind to given address to listen for JSON-RPC connections. Use [host]:port notation for IPv6. This option can be specified multiple times (default: bind to all interfaces)</source>
<translation>Se lier à l'adresse donnée pour écouter des connexions JSON-RPC. Utiliser la notation [host]:port pour l'IPv6. Cette option peut être spécifiée plusieurs fois (par défaut : se lier à toutes les interfaces)</translation>
</message>
<message>
<source>Create new files with system default permissions, instead of umask 077 (only effective with disabled wallet functionality)</source>
<translation>Créer de nouveaux fichiers avec les permissions système par défaut, au lieu de umask 077 (effectif seulement avec la fonction du porte-monnaie désactivée)</translation>
</message>
<message>
<source>Discover own IP addresses (default: 1 when listening and no -externalip or -proxy)</source>
<translation>Découvrir ses propres adresses (par défaut : 1 en écoute et sans externalip ou -proxy)</translation>
</message>
<message>
<source>Error: Listening for incoming connections failed (listen returned error %s)</source>
<translation>Erreur : l'écoute des connexions entrantes a échoué (l'écoute a retourné l'erreur %s)</translation>
</message>
<message>
<source>Execute command when a relevant alert is received or we see a really long fork (%s in cmd is replaced by message)</source>
<translation>Exécuter une commande lorsqu'une alerte pertinente est reçue, ou si nous voyons une bifurcation vraiment étendue (%s dans la commande est remplacé par le message)</translation>
</message>
<message>
<source>Fees (in %s/kB) smaller than this are considered zero fee for relaying, mining and transaction creation (default: %s)</source>
<translation>Les frais (en %s/Ko) inférieurs à ce seuil sont considérés comme étant nuls pour le relais, le minage et la création de transactions (par défaut : %s)</translation>
</message>
<message>
<source>If paytxfee is not set, include enough fee so transactions begin confirmation on average within n blocks (default: %u)</source>
<translation>Si paytxfee n'est pas défini, inclure suffisamment de frais afin que les transactions commencent la confirmation en moyenne avant n blocs (par défaut : %u)</translation>
</message>
<message>
<source>Invalid amount for -maxtxfee=<amount>: '%s' (must be at least the minrelay fee of %s to prevent stuck transactions)</source>
<translation>Montant invalide pour -maxtxfee=<amount> : « %s » (doit être au moins les frais minrelay de %s pour prévenir le blocage des transactions)</translation>
</message>
<message>
<source>Maximum size of data in data carrier transactions we relay and mine (default: %u)</source>
<translation>Quantité maximale de données dans les transactions du porteur de données que nous relayons et minons (par défaut : %u)</translation>
</message>
<message>
<source>Query for peer addresses via DNS lookup, if low on addresses (default: 1 unless -connect)</source>
<translation>Demander les adresses des pairs par recherche DNS si l'on manque d'adresses (par défaut : 1 sauf si -connect)</translation>
</message>
<message>
<source>Randomize credentials for every proxy connection. This enables Tor stream isolation (default: %u)</source>
<translation>Aléer les authentifiants pour chaque connexion mandataire. Cela active l'isolement de flux de Tor (par défaut : %u) </translation>
</message>
<message>
<source>Set maximum size of high-priority/low-fee transactions in bytes (default: %d)</source>
<translation>Définir la taille maximale en octets des transactions à priorité élevée et frais modiques (par défaut : %d)</translation>
</message>
<message>
<source>The transaction amount is too small to send after the fee has been deducted</source>
<translation>Le montant de la transaction est trop bas pour être envoyé une fois que les frais ont été déduits</translation>
</message>
<message>
<source>This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit <https://www.openssl.org/> and cryptographic software written by Eric Young and UPnP software written by Thomas Bernard.</source>
<translation>Ce produit comprend des logiciels développés par le projet OpenSSL pour être utilisés dans la boîte à outils OpenSSL <https://www.openssl.org/> et un logiciel cryptographique écrit par Eric Young, ainsi qu'un logiciel UPnP écrit par Thomas Bernard.</translation>
</message>
<message>
<source>Use hierarchical deterministic key generation (HD) after BIP32. Only has effect during wallet creation/first start</source>
<translation>Utiliser une génération de clé hiérarchique déterministe (HD) après BIP32. N'a d'effet que lors de la création ou du lancement intitial du porte-monnaie</translation>
</message>
<message>
<source>Whitelisted peers cannot be DoS banned and their transactions are always relayed, even if they are already in the mempool, useful e.g. for a gateway</source>
<translation>Les pairs de la liste blanche ne peuvent pas être bannis DoS et leurs transactions sont toujours relayées, même si elles sont déjà dans le mempool, utile p. ex. pour une passerelle</translation>
</message>
<message>
<source>You need to rebuild the database using -reindex to go back to unpruned mode. This will redownload the entire blockchain</source>
<translation>Vous devez reconstruire la base de données en utilisant -reindex afin de revenir au mode sans élagage. Cela retéléchargera complètement la chaîne de blocs.</translation>
</message>
<message>
<source>(default: %u)</source>
<translation>(par défaut : %u)</translation>
</message>
<message>
<source>Accept public REST requests (default: %u)</source>
<translation>Accepter les demandes REST publiques (par défaut : %u)</translation>
</message>
<message>
<source>Automatically create Tor hidden service (default: %d)</source>
<translation>Créer automatiquement un service caché Tor (par défaut : %d)</translation>
</message>
<message>
<source>Connect through SOCKS5 proxy</source>
<translation>Se connecter par un mandataire SOCKS5</translation>
</message>
<message>
<source>Error reading from database, shutting down.</source>
<translation>Erreur de lecture de la base de données, fermeture en cours.</translation>
</message>
<message>
<source>Imports blocks from external blk000??.dat file on startup</source>
<translation>Importe des blocs à partir d'un fichier blk000??.dat externe lors du démarrage</translation>
</message>
<message>
<source>Information</source>
<translation>Informations</translation>
</message>
<message>
<source>Invalid amount for -paytxfee=<amount>: '%s' (must be at least %s)</source>
<translation>Montant invalide pour -paytxfee=<montant> : « %s » (doit être au moins %s)</translation>
</message>
<message>
<source>Invalid netmask specified in -whitelist: '%s'</source>
<translation>Masque réseau invalide spécifié dans -whitelist : « %s »</translation>
</message>
<message>
<source>Keep at most <n> unconnectable transactions in memory (default: %u)</source>
<translation>Garder au plus <n> transactions non connectables en mémoire (par défaut : %u)</translation>
</message>
<message>
<source>Need to specify a port with -whitebind: '%s'</source>
<translation>Un port doit être spécifié avec -whitebind : « %s »</translation>
</message>
<message>
<source>Node relay options:</source>
<translation>Options de relais du nœud :</translation>
</message>
<message>
<source>RPC server options:</source>
<translation>Options du serveur RPC :</translation>
</message>
<message>
<source>Reducing -maxconnections from %d to %d, because of system limitations.</source>
<translation>Réduction de -maxconnections de %d à %d, due aux restrictions du système</translation>
</message>
<message>
<source>Rescan the block chain for missing wallet transactions on startup</source>
<translation>Réanalyser la chaîne de blocs au démarrage, à la recherche de transactions de porte-monnaie manquantes</translation>
</message>
<message>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation>Envoyer les infos de débogage/trace à la console au lieu du fichier debug.log</translation>
</message>
<message>
<source>Send transactions as zero-fee transactions if possible (default: %u)</source>
<translation>Envoyer si possible les transactions comme étant sans frais (par défaut : %u)</translation>
</message>
<message>
<source>Show all debugging options (usage: --help -help-debug)</source>
<translation>Montrer toutes les options de débogage (utilisation : --help --help-debug)</translation>
</message>
<message>
<source>Shrink debug.log file on client startup (default: 1 when no -debug)</source>
<translation>Réduire le fichier debug.log lors du démarrage du client (par défaut : 1 sans -debug)</translation>
</message>
<message>
<source>Signing transaction failed</source>
<translation>Échec de signature de la transaction</translation>
</message>
<message>
<source>The transaction amount is too small to pay the fee</source>
<translation>Le montant de la transaction est trop bas pour que les frais soient payés</translation>
</message>
<message>
<source>This is experimental software.</source>
<translation>Ceci est un logiciel expérimental.</translation>
</message>
<message>
<source>Tor control port password (default: empty)</source>
<translation>Mot de passe du port de contrôle Tor (par défaut : vide)</translation>
</message>
<message>
<source>Tor control port to use if onion listening enabled (default: %s)</source>
<translation>Port de contrôle Tor à utiliser si l'écoute onion est activée (par défaut :%s)</translation>
</message>
<message>
<source>Transaction amount too small</source>
<translation>Le montant de la transaction est trop bas</translation>
</message>
<message>
<source>Transaction amounts must be positive</source>
<translation>Les montants de transaction doivent être positifs</translation>
</message>
<message>
<source>Transaction too large for fee policy</source>
<translation>La transaction est trop grosse pour la politique de frais</translation>
</message>
<message>
<source>Transaction too large</source>
<translation>La transaction est trop grosse</translation>
</message>
<message>
<source>Unable to bind to %s on this computer (bind returned error %s)</source>
<translation>Impossible de se lier à %s sur cet ordinateur (bind a retourné l'erreur %s)</translation>
</message>
<message>
<source>Upgrade wallet to latest format on startup</source>
<translation>Mettre à niveau le porte-monnaie au démarrage vers le format le plus récent</translation>
</message>
<message>
<source>Username for JSON-RPC connections</source>
<translation>Nom d'utilisateur pour les connexions JSON-RPC</translation>
</message>
<message>
<source>Warning</source>
<translation>Avertissement</translation>
</message>
<message>
<source>Warning: unknown new rules activated (versionbit %i)</source>
<translation>Avertissement : nouvelles règles inconnues activées (bit de version %i)</translation>
</message>
<message>
<source>Whether to operate in a blocks only mode (default: %u)</source>
<translation>Faut-il fonctionner en mode blocs seulement (par défaut : %u)</translation>
</message>
<message>
<source>Zapping all transactions from wallet...</source>
<translation>Supprimer toutes les transactions du porte-monnaie...</translation>
</message>
<message>
<source>ZeroMQ notification options:</source>
<translation>Options de notification ZeroMQ</translation>
</message>
<message>
<source>Password for JSON-RPC connections</source>
<translation>Mot de passe pour les connexions JSON-RPC</translation>
</message>
<message>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation>Exécuter la commande lorsque le meilleur bloc change (%s dans cmd est remplacé par le hachage du bloc)</translation>
</message>
<message>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation>Autoriser les recherches DNS pour -addnode, -seednode et -connect</translation>
</message>
<message>
<source>Loading addresses...</source>
<translation>Chargement des adresses…</translation>
</message>
<message>
<source>(1 = keep tx meta data e.g. account owner and payment request information, 2 = drop tx meta data)</source>
<translation>(1 = conserver les métadonnées de transmission, p. ex. les informations du propriétaire du compte et de demande de paiement, 2 = abandonner les métadonnées de transmission)</translation>
</message>
<message>
<source>-maxtxfee is set very high! Fees this large could be paid on a single transaction.</source>
<translation>La valeur -maxtxfee est très élevée ! Des frais aussi élevés pourraient être payés en une seule transaction.</translation>
</message>
<message>
<source>-paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source>
<translation>La valeur -paytxfee est très élevée ! Il s'agit des frais de transaction que vous payerez si vous envoyez une transaction.</translation>
</message>
<message>
<source>Do not keep transactions in the mempool longer than <n> hours (default: %u)</source>
<translation>Ne pas conserver de transactions dans la réserve de mémoire plus de <n> heures (par défaut : %u)</translation>
</message>
<message>
<source>Fees (in %s/kB) smaller than this are considered zero fee for transaction creation (default: %s)</source>
<translation>Les frais (en %s/Ko) inférieurs à ce seuil sont considérés comme étant nuls pour la création de transactions (par défaut : %s)</translation>
</message>
<message>
<source>How thorough the block verification of -checkblocks is (0-4, default: %u)</source>
<translation>Degré de profondeur de la vérification des blocs -checkblocks (0-4, par défaut : %u)</translation>
</message>
<message>
<source>Maintain a full transaction index, used by the getrawtransaction rpc call (default: %u)</source>
<translation>Maintenir un index complet des transactions, utilisé par l'appel RPC getrawtransaction (obtenir la transaction brute) (par défaut : %u)</translation>
</message>
<message>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: %u)</source>
<translation>Délai en secondes de refus de reconnexion pour les pairs présentant un mauvais comportement (par défaut : %u)</translation>
</message>
<message>
<source>Output debugging information (default: %u, supplying <category> is optional)</source>
<translation>Extraire les informations de débogage (par défaut : %u, fournir <category> est facultatif)</translation>
</message>
<message>
<source>Support filtering of blocks and transaction with bloom filters (default: %u)</source>
<translation>Prendre en charge le filtrage des blocs et des transactions avec les filtres bloom (par défaut : %u)</translation>
</message>
<message>
<source>Total length of network version string (%i) exceeds maximum length (%i). Reduce the number or size of uacomments.</source>
<translation>La taille totale de la chaîne de version de réseau (%i) dépasse la longueur maximale (%i). Réduire le nombre ou la taille des commentaires uacomments.</translation>
</message>
<message>
<source>Tries to keep outbound traffic under the given target (in MiB per 24h), 0 = no limit (default: %d)</source>
<translation>Tente de garder le trafic sortant sous la cible donnée (en Mio par 24 h), 0 = sans limite (par défaut : %d)</translation>
</message>
<message>
<source>Unsupported argument -socks found. Setting SOCKS version isn't possible anymore, only SOCKS5 proxies are supported.</source>
<translation>L'argument non pris en charge -socks a été trouvé. Il n'est plus possible de définir la version de SOCKS, seuls les mandataires SOCKS5 sont pris en charge.</translation>
</message>
<message>
<source>Unsupported argument -whitelistalwaysrelay ignored, use -whitelistrelay and/or -whitelistforcerelay.</source>
<translation>Argument non pris charge -whitelistalwaysrelay ignoré, utiliser -whitelistrelay et/ou -whitelistforcerelay.</translation>
</message>
<message>
<source>Use separate SOCKS5 proxy to reach peers via Tor hidden services (default: %s)</source>
<translation>Utiliser un serveur mandataire SOCKS5 séparé pour atteindre les pairs par les services cachés de Tor (par défaut : %s)</translation>
</message>
<message>
<source>Username and hashed password for JSON-RPC connections. The field <userpw> comes in the format: <USERNAME>:<SALT>$<HASH>. A canonical python script is included in share/rpcuser. This option can be specified multiple times</source>
<translation>Nom d'utilisateur et mot de passe haché pour les connexions JSON-RPC. Le champ <userpw> vient au format : <USERNAME>:<SALT>$<HASH>. Un script python canonique est inclus dans share/rpcuser. Cette option peut être spécifiée plusieurs fois.</translation>
</message>
<message>
<source>Warning: Unknown block versions being mined! It's possible unknown rules are in effect</source>
<translation>Avertissement : des versions de blocs inconnues sont minées ! Il est possible que des règles inconnues soient en vigeur</translation>
</message>
<message>
<source>Warning: Wallet file corrupt, data salvaged! Original %s saved as %s in %s; if your balance or transactions are incorrect you should restore from a backup.</source>
<translation>Avertissement : le fichier du porte-monnaie est corrompu, les données ont été récupérées ! Le fichier %s original a été enregistré en tant que %s dans %s ; si votre solde ou vos transactions sont incorrects, vous devriez restaurer une sauvegarde.</translation>
</message>
<message>
<source>(default: %s)</source>
<translation>(par défaut : %s)</translation>
</message>
<message>
<source>Always query for peer addresses via DNS lookup (default: %u)</source>
<translation>Toujours demander les adresses des pairs par recherche DNS (par défaut : %u)</translation>
</message>
<message>
<source>How many blocks to check at startup (default: %u, 0 = all)</source>
<translation>Nombre de blocs à vérifier au démarrage (par défaut : %u, 0 = tous)</translation>
</message>
<message>
<source>Include IP addresses in debug output (default: %u)</source>
<translation>Inclure les adresses IP à la sortie de débogage (par défaut : %u)</translation>
</message>
<message>
<source>Invalid -proxy address: '%s'</source>
<translation>Adresse -proxy invalide : « %s »</translation>
</message>
<message>
<source>Listen for JSON-RPC connections on <port> (default: %u or testnet: %u)</source>
<translation>Écouter les connexions JSON-RPC sur <port> (par défaut : %u ou tesnet : %u)</translation>
</message>
<message>
<source>Listen for connections on <port> (default: %u or testnet: %u)</source>
<translation>Écouter les connexions sur <port> (par défaut : %u ou tesnet : %u)</translation>
</message>
<message>
<source>Maintain at most <n> connections to peers (default: %u)</source>
<translation>Garder au plus <n> connexions avec les pairs (par défaut : %u)</translation>
</message>
<message>
<source>Make the wallet broadcast transactions</source>
<translation>Obliger le porte-monnaie à diffuser les transactions</translation>
</message>
<message>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: %u)</source>
<translation>Tampon maximal de réception par connexion, <n>*1000 octets (par défaut : %u)</translation>
</message>
<message>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: %u)</source>
<translation>Tampon maximal d'envoi par connexion », <n>*1000 octets (par défaut : %u)</translation>
</message>
<message>
<source>Prepend debug output with timestamp (default: %u)</source>
<translation>Ajouter l'horodatage au début de la sortie de débogage (par défaut : %u)</translation>
</message>
<message>
<source>Relay and mine data carrier transactions (default: %u)</source>
<translation>Relayer et miner les transactions du porteur de données (par défaut : %u)</translation>
</message>
<message>
<source>Relay non-P2SH multisig (default: %u)</source>
<translation>Relayer les multisignatures non-P2SH (par défaut : %u)</translation>
</message>
<message>
<source>Set key pool size to <n> (default: %u)</source>
<translation>Définir la taille de la réserve de clés à <n> (par défaut : %u)</translation>
</message>
<message>
<source>Set the number of threads to service RPC calls (default: %d)</source>
<translation>Définir le nombre de fils pour les appels RPC (par défaut : %d)</translation>
</message>
<message>
<source>Specify configuration file (default: %s)</source>
<translation>Spécifier le fichier de configuration (par défaut : %s)</translation>
</message>
<message>
<source>Specify connection timeout in milliseconds (minimum: 1, default: %d)</source>
<translation>Spécifier le délai d'expiration de la connexion en millisecondes (minimum : 1, par défaut : %d)</translation>
</message>
<message>
<source>Specify pid file (default: %s)</source>
<translation>Spécifier le fichier pid (par défaut : %s)</translation>
</message>
<message>
<source>Spend unconfirmed change when sending transactions (default: %u)</source>
<translation>Dépenser la monnaie non confirmée lors de l'envoi de transactions (par défaut : %u)</translation>
</message>
<message>
<source>Threshold for disconnecting misbehaving peers (default: %u)</source>
<translation>Seuil de déconnexion des pairs présentant un mauvais comportement (par défaut : %u)</translation>
</message>
<message>
<source>Unknown network specified in -onlynet: '%s'</source>
<translation>Réseau inconnu spécifié dans -onlynet : « %s »</translation>
</message>
<message>
<source>Insufficient funds</source>
<translation>Fonds insuffisants</translation>
</message>
<message>
<source>Loading block index...</source>
<translation>Chargement de l’index des blocs…</translation>
</message>
<message>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation>Ajouter un nœud auquel se connecter et tenter de garder la connexion ouverte</translation>
</message>
<message>
<source>Loading wallet...</source>
<translation>Chargement du porte-monnaie…</translation>
</message>
<message>
<source>Cannot downgrade wallet</source>
<translation>Impossible de revenir à une version inférieure du porte-monnaie</translation>
</message>
<message>
<source>Cannot write default address</source>
<translation>Impossible d'écrire l'adresse par défaut</translation>
</message>
<message>
<source>Rescanning...</source>
<translation>Nouvelle analyse…</translation>
</message>
<message>
<source>Done loading</source>
<translation>Chargement terminé</translation>
</message>
<message>
<source>Error</source>
<translation>Erreur</translation>
</message>
</context>
</TS><|fim▁end|>
|
<source>Open until %1</source>
|
<|file_name|>gui.py<|end_file_name|><|fim▁begin|>import wx
import wx.calendar
from wx.lib.masked import TimeCtrl
from wx.lib.agw import hypertreelist as HTL
from datetime import datetime, time
from lib import Task, DATA, PRIORITIES, DEFAULT_PRIORITY
from decorators import requires_selection
ID_ADD_TASK = 1000
ID_ADD_SUBTASK = 1010
ID_COLLAPSE = 1020
ID_EXPAND = 1030
HIDE_COMPLETE = False
class TaskList(HTL.HyperTreeList):
"""
This is the widget that houses the tasks
"""
def __init__(self, parent):
self.parent = parent
style = wx.SUNKEN_BORDER | wx.TR_HAS_BUTTONS | wx.TR_HAS_VARIABLE_ROW_HEIGHT | wx.TR_HIDE_ROOT | wx.TR_FULL_ROW_HIGHLIGHT | wx.TR_ROW_LINES | wx.TR_EDIT_LABELS #| wx.TR_COLUMN_LINES | HTL.TR_AUTO_CHECK_PARENT
HTL.HyperTreeList.__init__(self, parent, -1, style=style)
self.AddColumn('%')
self.AddColumn('!')
self.AddColumn('Task')
self.AddColumn('Due')
self.SetMainColumn(2)
self.root = self.AddRoot('Tasks')
self.GetMainWindow().Bind(wx.EVT_LEFT_DCLICK, self.OnLeftDClick)
self.Bind(wx.EVT_TREE_END_LABEL_EDIT, self.OnEndEdit)
self.Bind(HTL.EVT_TREE_ITEM_CHECKED, self.OnItemToggled)
def EvaluateCompleteness(self, item=None):
"""Determines how complete various task trees are"""
pass
def OnEndEdit(self, evt):
print 'Save task?', evt.GetLabel(), evt.GetItem()
task = evt.GetItem().GetData()
if task:
task.summary = evt.GetLabel()
def OnLeftDClick(self, evt):
pt = evt.GetPosition()
item, flags, column = self.HitTest(pt)
if item and (flags & wx.TREE_HITTEST_ONITEMLABEL):
#self.EditLabel(item)
self.parent.EditTask(item)
evt.Skip()
def OnItemToggled(self, evt):
item = evt.GetItem()
task = item.GetData()
if task:
task.is_complete = item.IsChecked()
if HIDE_COMPLETE:
item.Hide(task.is_complete)
self.EvaluateCompleteness()
def SetTasks(self, tasks):
for task in tasks:
self.AddTask(task, refresh=False)
self.Refresh()
self.ExpandAll()
def AddTask(self, task, parent=None, refresh=True):
if parent is None:
parent = self.root
task.parent = parent
item = self.AppendItem(parent, task.summary, ct_type=1)
item.SetData(task)
for child in task.children:
self.AddTask(child, item, refresh=refresh)
if refresh:
self.Refresh()
def Refresh(self, erase=True, rect=None, parent=None):
"""Refreshes the tree when a task has changed"""
if parent is None:
parent = self.root
for child in parent.GetChildren():
task = child.GetData()
if task:
self.SetItemText(child, '0%', 0)
self.SetItemText(child, str(task._priority), 1)
self.SetItemText(child, task.summary, 2)
child.Check(task.is_complete)
if HIDE_COMPLETE:
child.Hide(task.is_complete)
if task.due_date:
self.SetItemText(child, task.due_date.strftime('%H:%M %m/%d/%y'), 3)
else:
self.SetItemText(child, '', 3)
self.Refresh(parent=child)
super(TaskList, self).Refresh()
class TaskInfoDialog(wx.Dialog):
def __init__(self, *args, **kwds):
self.task = kwds.pop('task', None)
kwds['style'] = wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER | wx.THICK_FRAME
wx.Dialog.__init__(self, *args, **kwds)
self.panel = wx.Panel(self, -1)
self.txtSummary = wx.TextCtrl(self.panel, -1, "")
self.lblNotes = wx.StaticText(self.panel, -1, _('Notes:'), style=wx.ALIGN_RIGHT)
self.txtNotes = wx.TextCtrl(self.panel, -1, "", style=wx.TE_MULTILINE|wx.TE_RICH|wx.TE_WORDWRAP)
self.lblPriority = wx.StaticText(self.panel, -1, _('Priority:'), style=wx.ALIGN_RIGHT)
choices = [p[1] for p in sorted(PRIORITIES.items(), key=lambda p: p[0])]
self.cmbPriority = wx.ComboBox(self.panel, -1, choices=choices, style=wx.CB_DROPDOWN)
self.chkIsComplete = wx.CheckBox(self.panel, -1, _('Is Complete'))
self.lblDateDue = wx.StaticText(self.panel, -1, _('Due:'), style=wx.ALIGN_RIGHT)
self.chkIsDue = wx.CheckBox(self.panel, -1, _('Has due date'))
self.calDueDate = wx.calendar.CalendarCtrl(self.panel, -1)
self.txtTime = TimeCtrl(self.panel, id=-1,
value=datetime.now().strftime('%X'),
style=wx.TE_PROCESS_TAB,
validator=wx.DefaultValidator,
format='24HHMMSS',
fmt24hr=True,
displaySeconds=True,
)
self.__set_properties()
self.__do_layout()
self.chkIsDue.Bind(wx.EVT_CHECKBOX, self.ToggleDueDate)
self.txtSummary.SetFocus()
if self.task is not None:
self.SetTask(self.task)
def __set_properties(self):
self.SetTitle(_('Task Information'))
self.cmbPriority.SetValue(PRIORITIES[DEFAULT_PRIORITY])
self.calDueDate.Enable(False)
self.txtTime.Enable(False)
def __do_layout(self):
mainSizer = wx.BoxSizer(wx.VERTICAL)
sizer = wx.FlexGridSizer(5, 2, 5, 5)
lblSubject = wx.StaticText(self.panel, -1, _('Summary:'))
sizer.Add(lblSubject, 0, wx.EXPAND, 0)
sizer.Add(self.txtSummary, 0, wx.ALL|wx.EXPAND, 0)
sizer.Add(self.lblNotes, 0, wx.EXPAND, 0)
sizer.Add(self.txtNotes, 0, wx.EXPAND, 0)
sizer.Add(self.lblPriority, 0, wx.EXPAND, 0)
sizer.Add(self.cmbPriority, 0, wx.EXPAND, 0)
sizer.Add((20, 20), 0, 0, 0)
sizer.Add(self.chkIsComplete, 0, 0, 0)
sizer.Add(self.lblDateDue, 0, wx.ALIGN_RIGHT, 0)
sizer.Add(self.chkIsDue, 0, 0, 0)
sizer.Add((20, 20), 0, 0, 0)
sizer.Add(self.calDueDate, 0, 0, 0)
sizer.Add((20, 20), 0, 0, 0)
sizer.Add(self.txtTime, 0, 0, 0)
self.panel.SetSizer(sizer)
sizer.AddGrowableRow(1)
sizer.AddGrowableCol(1)
mainSizer.Add(self.panel, 1, wx.ALL|wx.EXPAND, 5)
mainSizer.AddF(self.CreateStdDialogButtonSizer(wx.OK|wx.CANCEL),
wx.SizerFlags(0).Expand().Border(wx.BOTTOM|wx.RIGHT, 5))
self.SetSizer(mainSizer)
mainSizer.Fit(self)
self.Layout()
self.Centre()
size = (290, 450)
self.SetMinSize(size)
self.SetSize(size)
def ToggleDueDate(self, evt):
en = self.chkIsDue.IsChecked()
self.calDueDate.Enable(en)
self.txtTime.Enable(en)
def GetTask(self):
if self.task is None:
self.task = Task()
if self.chkIsDue.IsChecked():
due = self.calDueDate.PyGetDate()
tm = self.txtTime.GetValue()
try:
tm = datetime.strptime(tm, '%H:%M:%S').time()
except:
tm = datetime.strptime(tm, '%H:%M').time()
due = datetime.combine(due, tm)
else:
due = None
self.task.summary = self.txtSummary.GetValue()
self.task.is_complete = self.chkIsComplete.IsChecked()
self.task.due_date = due
self.task.priority = self.cmbPriority.GetValue()
self.task.notes = self.txtNotes.GetValue()
return self.task
def SetTask(self, task):
self.txtSummary.SetValue(task.summary)
self.txtNotes.SetValue(task.notes)
self.cmbPriority.SetStringSelection(task.priority)
self.chkIsComplete.SetValue(task.is_complete)<|fim▁hole|> self.calDueDate.PySetDate(task.due_date)
self.txtTime.SetValue(task.due_date.strftime('%X'))
self.task = task
class TreeDoFrame(wx.Frame):
"""
This is the main TreeDo window, where your tasks are laid out before you.
"""
def __init__(self):
wx.Frame.__init__(self, None, -1, title=_('TreeDo'), size=(350, 500))
self.SetMinSize((300, 300))
self.CenterOnParent()
self.toolbar = self.CreateToolBar(wx.TB_HORIZONTAL | wx.NO_BORDER | wx.TB_FLAT)
self.toolbar.SetToolBitmapSize((24, 24))
save_img = wx.Bitmap('res/save.png', wx.BITMAP_TYPE_PNG)
add_img = wx.Bitmap('res/add.png', wx.BITMAP_TYPE_PNG)
add_sub_img = wx.Bitmap('res/add_subtask.png', wx.BITMAP_TYPE_PNG)
collapse_img = wx.Bitmap('res/collapse.png', wx.BITMAP_TYPE_PNG)
expand_img = wx.Bitmap('res/expand.png', wx.BITMAP_TYPE_PNG)
delete_img = wx.Bitmap('res/delete.png', wx.BITMAP_TYPE_PNG)
self.toolbar.AddSimpleTool(wx.ID_SAVE, save_img, _('Save Task List'), _('Save the task list to the hard drive'))
self.toolbar.AddSimpleTool(ID_ADD_TASK, add_img, _('Add Task'), _('Create a new task'))
self.toolbar.AddSimpleTool(ID_ADD_SUBTASK, add_sub_img, _('Add Sub-Task'), _('Create a new subtask'))
#self.toolbar.AddSimpleTool(ID_COLLAPSE, collapse_img, _('Collapse'), _('Collapse all tasks'))
self.toolbar.AddSimpleTool(ID_EXPAND, expand_img, _('Expand'), _('Expand all tasks'))
self.toolbar.AddSimpleTool(wx.ID_DELETE, delete_img, _('Delete'), _('Delete this task'))
self.Bind(wx.EVT_TOOL, self.OnToolClick)
self.toolbar.Realize()
sizer = wx.BoxSizer(wx.VERTICAL)
self.tree = TaskList(self)
sizer.Add(self.tree, 1, wx.EXPAND)
self.Bind(wx.EVT_SIZE, self.UpdateColumnWidths)
self.tree.Bind(wx.EVT_TREE_SEL_CHANGED, self.ToggleToolbarButtons)
self.tree.SetTasks(DATA.get_list())
self.ToggleToolbarButtons()
def UpdateColumnWidths(self, evt=None):
width, height = self.GetSize()
self.tree.SetColumnWidth(0, 40)
self.tree.SetColumnWidth(1, 20)
self.tree.SetColumnWidth(2, width - 180)
self.tree.SetColumnWidth(3, 100)
evt.Skip()
def ToggleToolbarButtons(self, evt=None):
"""Enable or disable certain toolbar buttons based on the selection"""
enable_sub_btns = (self.tree.GetSelection() != self.tree.root)
self.toolbar.EnableTool(ID_ADD_SUBTASK, enable_sub_btns)
self.toolbar.EnableTool(wx.ID_DELETE, enable_sub_btns)
if evt:
evt.Skip()
def AddTask(self, parent=None):
"""Allows the user to add a new task"""
taskDlg = TaskInfoDialog(self, -1, _('Task Info'))
if taskDlg.ShowModal() == wx.ID_OK:
task = taskDlg.GetTask()
self.tree.AddTask(task, parent)
@requires_selection
def AddSubTask(self):
"""Allows the user to add a new task to the selected task"""
parent = self.tree.GetSelection()
return self.AddTask(parent)
@requires_selection
def EditSelectedTask(self):
"""Allows the user to edit the selected task"""
item = self.tree.GetSelection()
self.EditTask(item)
def EditTask(self, item):
"""Allows the user to edit a task's information"""
task = item.GetData()
taskDlg = TaskInfoDialog(self, -1, _('Task Info'), task=task)
if taskDlg.ShowModal() == wx.ID_OK:
task = taskDlg.GetTask()
item.SetData(task)
self.tree.Refresh()
@requires_selection
def DeleteSelectedTask(self):
"""Allows the user to delete the selected task"""
item = self.tree.GetSelection()
self.DeleteTask(item)
def DeleteTask(self, item):
"""Allows the user to delete a task"""
if item.HasChildren():
print 'Deleting item with children'
self.tree.DeleteChildren(item)
self.tree.Delete(item)
def OnToolClick(self, evt):
eid = evt.GetId()
if eid == ID_ADD_TASK:
self.AddTask()
elif eid == ID_ADD_SUBTASK:
self.AddSubTask()
elif eid == ID_COLLAPSE:
for item in self.tree.GetChildren():
item.Collapse(self.tree)
elif eid == ID_EXPAND:
self.tree.ExpandAll()
elif eid == wx.ID_SAVE:
self.Persist()
elif eid == wx.ID_DELETE:
self.DeleteSelectedTask()
def Persist(self):
"""Persists the task list to the filesystem"""
DATA.persist(self.tree.root)<|fim▁end|>
|
if task.due_date is not None:
self.chkIsDue.SetValue(True)
|
<|file_name|>entry.js<|end_file_name|><|fim▁begin|>// React app
import React from 'react'
import {render} from 'react-dom'
import App from './components/base_layout/App.jsx'
// Redux state manager
import { Provider } from 'react-redux'
import { createStore } from 'redux'
import reducers from './state_manager/reducers'
// Electron IPC communication events
import ipcRendererEvents from './ipc_layer/ipcRendererEvents'
//////////////////////////
/// React Application ////
//////////////////////////<|fim▁hole|> <App />
</Provider>,
document.getElementById('app')
)
//////////////////////////////
/// IPC with main process ////
//////////////////////////////
ipcRendererEvents(store)
///////////////////
/// Workarounds ///
///////////////////
/* The chunk below will be executed after the react app is rendered */
import {resizer} from './components/base_layout/layout.css'
let nav = document.querySelector('nav')
let node = document.querySelector('.'+resizer)
let startX, startWidth
const initDrag = e => {
startX = e.clientX
startWidth = parseInt(window.getComputedStyle(nav).width)
window.addEventListener('mousemove', doDrag, false)
window.addEventListener('mouseup', stopDrag, false)
}
const doDrag = e => {
const newWidth = (startWidth + e.clientX - startX)
nav.style.width = (newWidth < 200 ? 200 : (newWidth > 400 ? 400: newWidth) ) + 'px'
}
const stopDrag = e => {
window.removeEventListener('mousemove', doDrag, false)
window.removeEventListener('mouseup', stopDrag, false)
}
node.addEventListener('mousedown', initDrag, false)<|fim▁end|>
|
export let store = createStore(reducers)
render(
<Provider store={store}>
|
<|file_name|>store.py<|end_file_name|><|fim▁begin|>from rknfilter.targets import BaseTarget
from rknfilter.db import Resource, Decision, CommitEvery
from rknfilter.core import DumpFilesParser
class StoreTarget(BaseTarget):
def __init__(self, *args, **kwargs):<|fim▁hole|> def process(self):
commit = CommitEvery(self._session)
for content, decision, domains, urls, ips, _ in self._dump_files_parser.get_data():
# TODO: move to models?
resource = Resource.get_or_create(self._session, rkn_id=content['rkn_id'])
if resource.id is None:
resource.include_date = content['include_date']
resource.entry_type = content['entry_type']
resource.urgency_type = content['urgency_type']
resource.block_type = content['block_type']
resource.decision = Decision(
date=decision['decision_date'],
org=decision['decision_org'],
num=decision['decision_num']
)
resource.sync_m2m_proxy('domains_list', domains)
resource.sync_m2m_proxy('urls_list', urls)
resource.sync_m2m_proxy('ips_list', ips)
commit()
commit(force=True)<|fim▁end|>
|
super(StoreTarget, self).__init__(*args, **kwargs)
self._dump_files_parser = DumpFilesParser()
|
<|file_name|>ip_vtk58.py<|end_file_name|><|fim▁begin|># Copyright (c) Charl P. Botha, TU Delft.
# All rights reserved.
# See COPYRIGHT for details.
import config
from install_package import InstallPackage
import os
import re
import shutil
import sys
import utils
BASENAME = "VTK"
GIT_REPO = "http://vtk.org/VTK.git"
GIT_TAG = "v5.8.0"
VTK_BASE_VERSION = "vtk-5.8"
# this patch does three things:
# 1. adds try/catch blocks to all python method calls in order
# to trap bad_alloc exceptions
# 2. implements my scheme for turning all VTK errors into Python exceptions
# by making use of a special output window class
# 3. gives up the GIL around all VTK calls. This is also necessary
# for 2 not to deadlock on multi-cores.
EXC_PATCH = "pyvtk580_tryexcept_and_pyexceptions.diff"
# fixes attributes in vtkproperty for shader use in python
VTKPRPRTY_PATCH = "vtkProperty_PyShaderVar.diff"
# recent segfault with vtk 5.6.1 and wxPython 2.8.11.0
# see here for more info:
# http://vtk.1045678.n5.nabble.com/wx-python-scripts-segfault-td1234471.html
WXVTKRWI_DISPLAYID_SEGFAULT_PATCH = "wxvtkrwi_displayid_segfault.diff"
dependencies = ['CMake']
class VTK58(InstallPackage):
def __init__(self):
self.source_dir = os.path.join(config.archive_dir, BASENAME)
self.build_dir = os.path.join(config.build_dir, '%s-build' %
(BASENAME,))
self.inst_dir = os.path.join(config.inst_dir, BASENAME)
self.exc_patch_src = os.path.join(config.patches_dir, EXC_PATCH)
self.exc_patch_dst = os.path.join(config.archive_dir, EXC_PATCH)
self.vtkprprty_patch_filename = os.path.join(config.patches_dir,
VTKPRPRTY_PATCH)
self.wxvtkrwi_displayid_segfault_patch_filename = os.path.join(
config.patches_dir,
WXVTKRWI_DISPLAYID_SEGFAULT_PATCH)
config.VTK_LIB = os.path.join(self.inst_dir, 'lib')
# whatever the case may be, we have to register VTK variables
if os.name == 'nt':<|fim▁hole|> # inst/VTK/lib/site-packages the VTK python package
config.VTK_PYTHON = os.path.join(
config.VTK_LIB, 'site-packages')
else:
# on *ix, inst/VTK/lib contains DLLs
config.VTK_SODIR = os.path.join(
config.VTK_LIB, VTK_BASE_VERSION)
# on *ix, inst/lib/python2.5/site-packages contains the
# VTK python package
# sys.version is (2, 5, 0, 'final', 0)
config.VTK_PYTHON = os.path.join(
config.VTK_LIB, 'python%d.%d/site-packages' % \
sys.version_info[0:2])
# this contains the VTK cmake config (same on *ix and Win)
config.VTK_DIR = os.path.join(config.VTK_LIB, VTK_BASE_VERSION)
def get(self):
if os.path.exists(self.source_dir):
utils.output("VTK already checked out, skipping step.")
else:
utils.goto_archive()
ret = os.system("git clone %s %s" % (GIT_REPO, BASENAME))
if ret != 0:
utils.error("Could not clone VTK repo. Fix and try again.")
os.chdir(self.source_dir)
ret = os.system("git checkout %s" % (GIT_TAG,))
if ret != 0:
utils.error("Could not checkout VTK %s. Fix and try again." % (GIT_TAG,))
if not os.path.exists(self.exc_patch_dst):
utils.output("Applying EXC patch")
# we do this copy so we can see if the patch has been done yet or not
shutil.copyfile(self.exc_patch_src, self.exc_patch_dst)
os.chdir(self.source_dir)
# default git-generated patch, so needs -p1
ret = os.system(
"%s -p1 < %s" % (config.PATCH, self.exc_patch_dst))
if ret != 0:
utils.error(
"Could not apply EXC patch. Fix and try again.")
# # VTKPRPRTY PATCH
# utils.output("Applying VTKPRPRTY patch")
# os.chdir(os.path.join(self.source_dir, 'Rendering'))
# ret = os.system(
# "%s -p0 < %s" % (config.PATCH, self.vtkprprty_patch_filename))
# if ret != 0:
# utils.error(
# "Could not apply VTKPRPRTY patch. Fix and try again.")
# # WXVTKRWI_DISPLAYID_SEGFAULT patch
# utils.output("Applying VTKWXRWI_DISPLAYID_SEGFAULT patch")
# os.chdir(self.source_dir)
# # default git-generated patch, so needs -p1
# ret = os.system(
# "%s -p1 < %s" % (config.PATCH,
# self.wxvtkrwi_displayid_segfault_patch_filename))
# if ret != 0:
# utils.error(
# "Could not apply WXVTKRWI_DISPLAYID_SEGFAULT patch. Fix and try again.")
def unpack(self):
pass
def configure(self):
if os.path.exists(
os.path.join(self.build_dir, 'CMakeFiles/cmake.check_cache')):
utils.output("VTK build already configured.")
return
if not os.path.exists(self.build_dir):
os.mkdir(self.build_dir)
cmake_params = "-DBUILD_SHARED_LIBS=ON " \
"-DBUILD_TESTING=OFF " \
"-DCMAKE_BUILD_TYPE=RelWithDebInfo " \
"-DCMAKE_INSTALL_PREFIX=%s " \
"-DVTK_USE_TK=NO " \
"-DVTK_USE_METAIO=ON " \
"-DVTK_USE_PARALLEL=ON " \
"-DPYTHON_EXECUTABLE=%s " \
"-DPYTHON_LIBRARY=%s " \
"-DPYTHON_INCLUDE_PATH=%s " \
"-DVTK_WRAP_PYTHON=ON " % (self.inst_dir,
config.PYTHON_EXECUTABLE,
config.PYTHON_LIBRARY,
config.PYTHON_INCLUDE_PATH)
ret = utils.cmake_command(self.build_dir, self.source_dir,
cmake_params)
if ret != 0:
utils.error("Could not configure VTK. Fix and try again.")
def build(self):
posix_file = os.path.join(self.build_dir,
'bin/libvtkWidgetsPython.so')
nt_file = os.path.join(self.build_dir, 'bin', config.BUILD_TARGET,
'vtkWidgetsPythonD.dll')
if utils.file_exists(posix_file, nt_file):
utils.output("VTK already built. Skipping build step.")
else:
os.chdir(self.build_dir)
ret = utils.make_command('VTK.sln')
if ret != 0:
utils.error("Error building VTK. Fix and try again.")
def install(self):
posix_file = os.path.join(self.inst_dir, 'bin/vtkpython')
nt_file = os.path.join(self.inst_dir, 'bin', 'vtkpython.exe')
if utils.file_exists(posix_file, nt_file):
utils.output("VTK already installed. Skipping build step.")
else:
# python 2.5.2 setup.py complains that this does not exist
# with VTK PV-3-2-1. This is only on installations with
# EasyInstall / Python Eggs, then the VTK setup.py uses
# EasyInstall and not standard distutils. gah!
# just tested with VTK 5.8.0 and Python 2.7.2
# it indeed installs VTK_PYTHON/VTK-5.8.0-py2.7.egg
# but due to the site.py and easy-install.pth magic in there,
# adding VTK_PYTHON to the PYTHONPATH still works. We can keep
# pip, yay!
if not os.path.exists(config.VTK_PYTHON):
os.makedirs(config.VTK_PYTHON)
os.chdir(self.build_dir)
# we save, set and restore the PP env variable, else
# stupid setuptools complains
save_env = os.environ.get('PYTHONPATH', '')
os.environ['PYTHONPATH'] = config.VTK_PYTHON
ret = utils.make_command('VTK.sln', install=True)
os.environ['PYTHONPATH'] = save_env
if ret != 0:
utils.error("Could not install VTK. Fix and try again.")
# now do some surgery on VTKConfig.cmake and
# VTKLibraryDepends.cmake so builds of VTK-dependent libraries
# with only the DRE to link with Just Work(tm)
# on windows, we need to replace backslash with forward slash
# as that's the style used by the config files. On *ix mostly
# harmless
idp = re.sub(r'\\','/', config.inst_dir)
for fn in [os.path.join(config.VTK_DIR, 'VTKConfig.cmake'),
os.path.join(config.VTK_DIR, 'VTKLibraryDepends.cmake'),
os.path.join(config.VTK_DIR, 'VTKTargets-relwithdebinfo.cmake')]:
if os.path.exists(fn):
utils.re_sub_filter_file(
[(idp, '${VTK_INSTALL_PREFIX}/..')],
fn)
def clean_build(self):
utils.output("Removing build and installation directories.")
if os.path.exists(self.inst_dir):
shutil.rmtree(self.inst_dir)
if os.path.exists(self.build_dir):
shutil.rmtree(self.build_dir)
def clean_install(self):
utils.output("Removing installation directory.")
if os.path.exists(self.inst_dir):
shutil.rmtree(self.inst_dir)
def get_installed_version(self):
import vtk
return vtk.vtkVersion.GetVTKVersion()<|fim▁end|>
|
# on Win, inst/VTK/bin contains the so files
config.VTK_SODIR = os.path.join(self.inst_dir, 'bin')
|
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>//! Parsing module for Rurtle programs.
//!
//! Note that parsing requires some additional information, i.e. the number of
//! arguments for a function. Function calls in Rurtle need neither parenthesis
//! nor something else, so this is legal:
//!
//! ```text
//! FUNCA FUNCB 10
//! ```
//!
//! Depending on how many arguments each function takes, this may be parsed as
//! either `funca(funcb(10))` or `funca(funcb(), 10)`.
//!
//! # Grammar
//!
//! A EBNF-like (incomplete) grammar may look like
//!
//! ```text
//! root := {statement} ;
//! statement := learn-def | if-stmt | repeat-stmt | while-stmt | return-stmt |
//! try-stmt | expression ;
//! learn-def := 'LEARN' identifier {variable} 'DO' {statement} 'END' ;
//! if-stmt := 'IF' expression 'DO' {statement} ['ELSE' {statement}]'END' ;
//! repeat-stmt := 'REPEAT' expression 'DO' {statement} 'END' ;
//! while-stmt := 'WHILE' expression 'DO' {statement} 'END' ;
//! return-stmt := 'RETURN' expression ;
//! try-stmt := 'TRY' {statement} 'ELSE' {statement} 'END' ;
//! variable := ':' identifier ;
//! identifier := idenfitier-start {identifier-cont} ;
//! idenfitier-start := <any alphabetic character> ;
//! idenfitier-cont := <any alpabetic or numeric character> ;
//! expression := comparison ;
//! comparison := expr [comp_op expr] ;
//! comp_op := '=' | '<' | '>' | ''<=' | '>=' | '<>' ;
//! expr := product {('+' | '-') product} ;
//! product := factor {('*' | '/') factor} ;
//! factor := '(' expression ')' | list | variable | string | number | (identifier {expression}) ;
//! list := '[' {expression} ']' ;
//! string := '"' {<any character>} '"' ;
//! number := ['+' | '-'] <any valid floating point number literal> ;
//! ```
pub mod ast;
use super::lex::{Token, MetaToken};
use self::ast::{Node, AddOp, MulOp, CompOp};
use self::ast::Node::*;
use std::collections::{HashMap, VecDeque};
use std::{error, fmt};
/// A `FuncMap` maps the name of a function to the number of arguments it takes
pub type FuncMap = HashMap<String, i32>;
/// A `Parser` builds an AST from the given input token stream.
pub struct Parser {
tokens: VecDeque<MetaToken>,
scope_stack: Vec<Scope>,
last_line: u32,
}
#[derive(Debug)]
pub enum ParseErrorKind {
UnexpectedToken(&'static str, Token),
UnexpectedEnd,
UnknownFunction(String),
}
impl fmt::Display for ParseErrorKind {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
use self::ParseErrorKind::*;
match *self {
UnexpectedToken(expected, ref got) => {
try!(fmt.pad("unexpected token, expected '"));
try!(fmt.pad(expected));
try!(fmt.pad("', got '"));
try!(got.fmt(fmt));
fmt.pad("'")
},
UnexpectedEnd => fmt.pad("unexpected end"),
UnknownFunction(ref name) => {
try!(fmt.pad("unknown function: "));
name.fmt(fmt)
}
}
}
}
// Error returns are pretty long anyway
use self::ParseErrorKind::*;
#[derive(Debug)]
pub struct ParseError {
line_number: u32,
kind: ParseErrorKind,
}
impl fmt::Display for ParseError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
let text = format!("Error in line {}: {}", self.line_number, self.kind);
fmt.pad(&text)
}
}
impl error::Error for ParseError {
fn description(&self) -> &str {
match self.kind {
UnexpectedToken(..) => "unexpected token",
UnexpectedEnd => "unexpected end",
UnknownFunction(..) => "unknown function",
}
}
}
pub type ParseResult = Result<Node, ParseError>;
#[derive(Debug, Clone)]
struct Scope {
functions: FuncMap,
}
impl Scope {
pub fn new() -> Scope {
Scope {
functions: FuncMap::new(),
}
}
}
/// Always returns an `Err` value but attaches the required meta information
/// (such as line number)
macro_rules! parse_error {
($s:expr, $k:expr) => {
{
// This is a very dirty hack to make clippy shut up about "needless return"
// we can't just omit return here since the macro may be used to exit a
// function early.
// The "if true" should be optmized away, but it's enough to make rustc and
// clippy happy. And if they're happy, I am too.
if true {
return Err(ParseError {
line_number: $s.last_line,
kind: $k,
})
};
unreachable!("parse_error goofed, true no longer considered true")
}
}
}
macro_rules! expect {
($s:expr, $t:path) => {
{
let token = try!($s.pop_left());
match token {
$t => (),
_ => parse_error!($s, UnexpectedToken(stringify!($t), token)),
}
}
}
}
impl Parser {
/// Construct a new `Parser`, consuming the given tokens.
pub fn new(tokens: VecDeque<MetaToken>, functions: FuncMap) -> Parser {
let global_scope = Scope {
functions: functions,
};
Parser {
tokens: tokens,
scope_stack: vec![global_scope],
last_line: 0,
}
}
/// Attempt to return the root node
pub fn parse(&mut self) -> ParseResult {
self.parse_statement_list()
}
fn current_scope_mut(&mut self) -> &mut Scope {
self.scope_stack.last_mut().expect("scope_stack is empty, should have global scope")
}
fn push_scope(&mut self) {
self.scope_stack.push(Scope::new())
}
fn pop_scope(&mut self) {
debug_assert!(self.scope_stack.len() > 1, "Trying to pop global scope");
self.scope_stack.pop().expect("scope_stack is empty, should have global scope");
}
fn find_function_arg_count(&self, name: &str) -> Option<i32> {
for scope in self.scope_stack.iter().rev() {
let function_map = &scope.functions;
match function_map.get(name) {
Some(i) => return Some(*i),
None => {},
}
}
None
}
fn peek(&self) -> Token {
self.tokens.front().unwrap().token.clone()
}
fn pop_left(&mut self) -> Result<Token, ParseError> {
if let Some(meta) = self.tokens.pop_front() {
self.last_line = meta.line_number;
Ok(meta.token)
} else {
parse_error!(self, UnexpectedEnd)
}
}
fn parse_statement_list(&mut self) -> ParseResult {
let mut statements = Vec::new();
while !self.tokens.is_empty() {
let statement = try!(self.parse_statement());
statements.push(statement);
}
Ok(StatementList(statements))
}
fn parse_loop_body(&mut self) -> ParseResult {
// Loop bodies generally introduce new scopes
self.push_scope();
let mut statements = Vec::new();
while !self.tokens.is_empty() {
match self.peek() {
Token::KeyElse | Token::KeyEnd => break,
_ => {
statements.push(try!(self.parse_statement()));
},
}
}
self.pop_scope();
Ok(StatementList(statements))
}
fn parse_statement(&mut self) -> ParseResult {
let token = self.peek();
match token {
Token::KeyLearn => self.parse_learn_stmt(),
Token::KeyIf => self.parse_if_stmt(),
Token::KeyRepeat => self.parse_repeat_stmt(),
Token::KeyWhile => self.parse_while_stmt(),
Token::KeyReturn => self.parse_return_stmt(),
Token::KeyTry => self.parse_try_stmt(),
_ => self.parse_expression(),
}
}
fn parse_learn_stmt(&mut self) -> ParseResult {
expect!(self, Token::KeyLearn);
let name = match try!(self.pop_left()) {
Token::Word(string) => string.to_uppercase(),
token => parse_error!(self, UnexpectedToken("Token::Word", token)),
};
let mut variables = Vec::new();
while !self.tokens.is_empty() {
match try!(self.pop_left()) {
Token::Colon => {
match try!(self.pop_left()) {
Token::Word(s) => variables.push(s),
token => parse_error!(self, UnexpectedToken("Token::Word", token)),
}
},
Token::KeyDo => break,
token => parse_error!(self, UnexpectedToken("Token::KeyDo", token)),
}
}
// We need the argument count for this function if it appears later
// during the parsing stage (e.g. in a recursive call)
self.current_scope_mut().functions.insert(name.clone(), variables.len() as i32);
let statements = try!(self.parse_loop_body());
expect!(self, Token::KeyEnd);
Ok(LearnStatement(name, variables, Box::new(statements)))
}
fn parse_if_stmt(&mut self) -> ParseResult {
expect!(self, Token::KeyIf);
let condition = Box::new(try!(self.parse_expression()));
expect!(self, Token::KeyDo);
let true_body = Box::new(try!(self.parse_loop_body()));
let false_body = if let Token::KeyElse = self.peek() {
try!(self.pop_left());
Some(Box::new(try!(self.parse_loop_body())))
} else { None };
expect!(self, Token::KeyEnd);
Ok(IfStatement(condition, true_body, false_body))
}
fn parse_repeat_stmt(&mut self) -> ParseResult {
expect!(self, Token::KeyRepeat);
let number = Box::new(try!(self.parse_expression()));
expect!(self, Token::KeyDo);
let body = try!(self.parse_loop_body());
expect!(self, Token::KeyEnd);
Ok(RepeatStatement(number, Box::new(body)))
}
fn parse_while_stmt(&mut self) -> ParseResult {
expect!(self, Token::KeyWhile);
let condition = Box::new(try!(self.parse_expression()));
expect!(self, Token::KeyDo);
let body = try!(self.parse_loop_body());
expect!(self, Token::KeyEnd);
Ok(WhileStatement(condition, Box::new(body)))
}
fn parse_return_stmt(&mut self) -> ParseResult {
expect!(self, Token::KeyReturn);
let result = Box::new(try!(self.parse_expression()));
Ok(ReturnStatement(result))
}
fn parse_try_stmt(&mut self) -> ParseResult {
expect!(self, Token::KeyTry);
let normal = Box::new(try!(self.parse_loop_body()));
expect!(self, Token::KeyElse);
let exception = Box::new(try!(self.parse_loop_body()));
expect!(self, Token::KeyEnd);
Ok(TryStatement(normal, exception))
}
fn parse_expression(&mut self) -> ParseResult {
self.parse_comparison()
}
fn parse_comparison(&mut self) -> ParseResult {
let operand = try!(self.parse_expr());
if self.tokens.is_empty() {
return Ok(operand);
};
match self.peek() {
Token::OpEq | Token::OpLt | Token::OpGt |
Token::OpLe | Token::OpGe | Token::OpNe => {
let op = match try!(self.pop_left()) {
Token::OpEq => CompOp::Equal,
Token::OpLt => CompOp::Less,
Token::OpGt => CompOp::Greater,
Token::OpLe => CompOp::LessEqual,
Token::OpGe => CompOp::GreaterEqual,
Token::OpNe => CompOp::NotEqual,
_ => unreachable!(),
};
let operand_right = Box::new(try!(self.parse_expr()));
Ok(Comparison(Box::new(operand), op, operand_right))
}
_ => Ok(operand),
}
}
fn parse_expr(&mut self) -> ParseResult {
let product = Box::new(try!(self.parse_product()));
let mut addends = Vec::new();
while !self.tokens.is_empty() {
match self.peek() {
Token::OpPlus | Token::OpMinus => {
let op = match try!(self.pop_left()) {
Token::OpPlus => AddOp::Add,
Token::OpMinus => AddOp::Sub,
_ => unreachable!(),
};
addends.push((op, try!(self.parse_product())));
},
_ => break,
}
}
Ok(Addition(product, addends))
}
fn parse_product(&mut self) -> ParseResult {
let factor = Box::new(try!(self.parse_factor()));
let mut factors = Vec::new();
while !self.tokens.is_empty() {
match self.peek() {
Token::OpMul | Token::OpDiv => {
let op = match try!(self.pop_left()) {
Token::OpMul => MulOp::Mul,
Token::OpDiv => MulOp::Div,
_ => unreachable!(),
};
factors.push((op, try!(self.parse_factor())));
},
_ => break,
}
}
Ok(Multiplication(factor, factors))
}
fn parse_factor(&mut self) -> ParseResult {
if self.tokens.is_empty() {
parse_error!(self, UnexpectedEnd);
};
match try!(self.pop_left()) {
Token::LParens => {
let factor = try!(self.parse_expression());
expect!(self, Token::RParens);
Ok(factor)
},
Token::LBracket => {
let mut list = Vec::new();
while !self.tokens.is_empty() {
if let Token::RBracket = self.peek() {
break
}
list.push(try!(self.parse_expression()));
}
expect!(self, Token::RBracket);
Ok(List(list))
},
Token::Colon => {
if let Token::Word(name) = try!(self.pop_left()) {
if self.tokens.is_empty() {
Ok(Variable(name))
} else {
if let Token::OpDefine = self.peek() {
try!(self.pop_left());
let value = try!(self.parse_expression());
Ok(Assignment(name, Box::new(value)))
} else {
Ok(Variable(name))
}
}
} else {
parse_error!(self, UnexpectedToken("Token::Word", try!(self.pop_left())))
}
},
// A function call
Token::Word(name) => {
let argument_count = match self.find_function_arg_count(&name.to_uppercase()) {
Some(i) => i,
None => parse_error!(self, UnknownFunction(name)),
};
let mut arguments = Vec::new();
for _ in 0..argument_count {
arguments.push(try!(self.parse_expression()));
}
Ok(FuncCall(name, arguments))
},
Token::String(string) => Ok(StringLiteral(string)),
Token::Number(num) => Ok(Number(num)),
// Unary prefixes for numbers
Token::OpMinus => {
match try!(self.pop_left()) {
Token::Number(num) => Ok(Number(-num)),
token => parse_error!(self, UnexpectedToken("Token::Number", token)),
}
},
Token::OpPlus => {
match try!(self.pop_left()) {<|fim▁hole|> }
},
token => parse_error!(self, UnexpectedToken("expression", token)),
}
}
}<|fim▁end|>
|
Token::Number(num) => Ok(Number(num)),
token => parse_error!(self, UnexpectedToken("Token::Number", token)),
|
<|file_name|>exception.go<|end_file_name|><|fim▁begin|>var user = os.Getenv("USER")
func init() {
if user == "" {
panic("no value for $USER")
}
}
func throwsPanic(f func()) (b bool) {
<|fim▁hole|> if x := recover(); x!= nil {
b =true
}
}()
f()
return
})<|fim▁end|>
|
defer func() {
|
<|file_name|>submit_fastqc.py<|end_file_name|><|fim▁begin|>from gscripts import qtools
import sys, os
if not os.path.exists("fastqc/"):
os.mkdir("fastqc")
cmds = []
Sub = qtools.Submitter()
for fileName in sys.argv[1:]:
fastqc_command = "fastqc -o fastqc %s" %fileName<|fim▁hole|> cmds.append(fastqc_command)
Sub.job(command_list=cmds, sh_file="runFastqc.sh", job_name="Fastqc", array=True, queue="home", nodes=1, ppn=1, submit=True, max_running=1000)<|fim▁end|>
| |
<|file_name|>test_templates.py<|end_file_name|><|fim▁begin|># Copyright (C) 2011-2014 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""Testing i18n template search and interpolation."""
from __future__ import absolute_import, print_function, unicode_literals
__metaclass__ = type
__all__ = [
]
import os
import shutil
import tempfile
import unittest
from pkg_resources import resource_filename
from zope.component import getUtility
from mailman.app.lifecycle import create_list
from mailman.config import config
from mailman.interfaces.languages import ILanguageManager
from mailman.testing.layers import ConfigLayer
from mailman.utilities.i18n import TemplateNotFoundError, find, make, search
class TestSearchOrder(unittest.TestCase):
"""Test internal search order for language templates."""
layer = ConfigLayer
def setUp(self):
self.var_dir = tempfile.mkdtemp()
config.push('no template dir', """\
[mailman]
default_language: fr
[paths.testing]
var_dir: {0}
""".format(self.var_dir))
language_manager = getUtility(ILanguageManager)
language_manager.add('de', 'utf-8', 'German')
language_manager.add('it', 'utf-8', 'Italian')
self.mlist = create_list('[email protected]')
self.mlist.preferred_language = 'de'
def tearDown(self):
config.pop('no template dir')
shutil.rmtree(self.var_dir)
def _stripped_search_order(self, template_file,
mailing_list=None, language=None):
# Return the search path order for a given template, possibly using
# the mailing list and the language as context. Note that this only
# returns the search path, and does not check for whether the paths
# exist or not.
#
# Replace the tempdir prefix with a placeholder for more readable and
# reproducible tests. Essentially the paths below are rooted at
# $var_dir, except those files that live within Mailman's source
# tree. The former will use /v/ as the root and the latter will use
# /m/ as the root.
in_tree = os.path.dirname(resource_filename('mailman', 'templates'))
raw_search_order = search(template_file, mailing_list, language)
for path in raw_search_order:
if path.startswith(self.var_dir):
path = '/v' + path[len(self.var_dir):]
elif path.startswith(in_tree):
path = '/m' + path[len(in_tree):]
else:
# This will cause tests to fail, so keep the full bogus
# pathname for better debugging.
pass
yield path
def test_fully_specified_search_order(self):
search_order = self._stripped_search_order('foo.txt', self.mlist, 'it')
# For convenience.
def nexteq(path):
self.assertEqual(next(search_order), path)
# 1: Use the given language argument
nexteq('/v/templates/lists/[email protected]/it/foo.txt')
nexteq('/v/templates/domains/example.com/it/foo.txt')
nexteq('/v/templates/site/it/foo.txt')
# 2: Use mlist.preferred_language
nexteq('/v/templates/lists/[email protected]/de/foo.txt')
nexteq('/v/templates/domains/example.com/de/foo.txt')
nexteq('/v/templates/site/de/foo.txt')
# 3: Use the site's default language
nexteq('/v/templates/lists/[email protected]/fr/foo.txt')
nexteq('/v/templates/domains/example.com/fr/foo.txt')
nexteq('/v/templates/site/fr/foo.txt')
# 4: English
nexteq('/v/templates/lists/[email protected]/en/foo.txt')
nexteq('/v/templates/domains/example.com/en/foo.txt')
nexteq('/v/templates/site/en/foo.txt')
# 5: After all the site-admin override paths have been searched, the
# Mailman in-tree paths are searched. Note that Mailman only ships
# one set of English templates.
nexteq('/m/templates/en/foo.txt')
def test_no_language_argument_search_order(self):
search_order = self._stripped_search_order('foo.txt', self.mlist)
# For convenience.
def nexteq(path):
self.assertEqual(next(search_order), path)
# 1: Use mlist.preferred_language
nexteq('/v/templates/lists/[email protected]/de/foo.txt')
nexteq('/v/templates/domains/example.com/de/foo.txt')
nexteq('/v/templates/site/de/foo.txt')
# 2: Use the site's default language
nexteq('/v/templates/lists/[email protected]/fr/foo.txt')
nexteq('/v/templates/domains/example.com/fr/foo.txt')
nexteq('/v/templates/site/fr/foo.txt')
# 3: English
nexteq('/v/templates/lists/[email protected]/en/foo.txt')
nexteq('/v/templates/domains/example.com/en/foo.txt')
nexteq('/v/templates/site/en/foo.txt')
# 4: After all the site-admin override paths have been searched, the
# Mailman in-tree paths are searched. Note that Mailman only ships
# one set of English templates.
nexteq('/m/templates/en/foo.txt')
def test_no_mailing_list_argument_search_order(self):
search_order = self._stripped_search_order('foo.txt', language='it')
# For convenience.
def nexteq(path):
self.assertEqual(next(search_order), path)
# 1: Use the given language argument
nexteq('/v/templates/site/it/foo.txt')
# 2: Use the site's default language
nexteq('/v/templates/site/fr/foo.txt')
# 3: English
nexteq('/v/templates/site/en/foo.txt')
# 4: After all the site-admin override paths have been searched, the
# Mailman in-tree paths are searched. Note that Mailman only ships
# one set of English templates.
nexteq('/m/templates/en/foo.txt')
def test_no_optional_arguments_search_order(self):
search_order = self._stripped_search_order('foo.txt')
# For convenience.
def nexteq(path):
self.assertEqual(next(search_order), path)
# 1: Use the site's default language
nexteq('/v/templates/site/fr/foo.txt')
# 2: English
nexteq('/v/templates/site/en/foo.txt')
# 3: After all the site-admin override paths have been searched, the
# Mailman in-tree paths are searched. Note that Mailman only ships
# one set of English templates.
nexteq('/m/templates/en/foo.txt')
class TestFind(unittest.TestCase):
"""Test template search."""
layer = ConfigLayer
def setUp(self):
self.var_dir = tempfile.mkdtemp()
config.push('template config', """\
[paths.testing]
var_dir: {0}
""".format(self.var_dir))
# The following MUST happen AFTER the push() above since pushing a new
# config also clears out the language manager.
getUtility(ILanguageManager).add('xx', 'utf-8', 'Xlandia')
self.mlist = create_list('[email protected]')
self.mlist.preferred_language = 'xx'
self.fp = None
# Populate the template directories with a few fake templates.
def write(text, path):
os.makedirs(os.path.dirname(path))
with open(path, 'w') as fp:
fp.write(text)
self.xxsite = os.path.join(
self.var_dir, 'templates', 'site', 'xx', 'site.txt')
write('Site template', self.xxsite)
self.xxdomain = os.path.join(
self.var_dir, 'templates',
'domains', 'example.com', 'xx', 'domain.txt')
write('Domain template', self.xxdomain)
self.xxlist = os.path.join(
self.var_dir, 'templates',
'lists', '[email protected]', 'xx', 'list.txt')
write('List template', self.xxlist)
def tearDown(self):
if self.fp is not None:
self.fp.close()
config.pop('template config')
shutil.rmtree(self.var_dir)
def test_find_site_template(self):
filename, self.fp = find('site.txt', language='xx')
self.assertEqual(filename, self.xxsite)
self.assertEqual(self.fp.read(), 'Site template')
def test_find_domain_template(self):
filename, self.fp = find('domain.txt', self.mlist)
self.assertEqual(filename, self.xxdomain)
self.assertEqual(self.fp.read(), 'Domain template')
def test_find_list_template(self):
filename, self.fp = find('list.txt', self.mlist)
self.assertEqual(filename, self.xxlist)
self.assertEqual(self.fp.read(), 'List template')
def test_template_not_found(self):
with self.assertRaises(TemplateNotFoundError) as cm:
find('missing.txt', self.mlist)
self.assertEqual(cm.exception.template_file, 'missing.txt')
<|fim▁hole|>
layer = ConfigLayer
def setUp(self):
self.var_dir = tempfile.mkdtemp()
config.push('template config', """\
[paths.testing]
var_dir: {0}
""".format(self.var_dir))
# The following MUST happen AFTER the push() above since pushing a new
# config also clears out the language manager.
getUtility(ILanguageManager).add('xx', 'utf-8', 'Xlandia')
self.mlist = create_list('[email protected]')
self.mlist.preferred_language = 'xx'
# Populate the template directories with a few fake templates.
path = os.path.join(self.var_dir, 'templates', 'site', 'xx')
os.makedirs(path)
with open(os.path.join(path, 'nosub.txt'), 'w') as fp:
print("""\
This is a global template.
It has no substitutions.
It will be wrapped.
""", file=fp)
with open(os.path.join(path, 'subs.txt'), 'w') as fp:
print("""\
This is a $kind template.
It has $howmany substitutions.
It will be wrapped.
""", file=fp)
with open(os.path.join(path, 'nowrap.txt'), 'w') as fp:
print("""\
This is a $kind template.
It has $howmany substitutions.
It will not be wrapped.
""", file=fp)
def tearDown(self):
config.pop('template config')
shutil.rmtree(self.var_dir)
def test_no_substitutions(self):
self.assertEqual(make('nosub.txt', self.mlist), """\
This is a global template. It has no substitutions. It will be
wrapped.""")
def test_substitutions(self):
self.assertEqual(make('subs.txt', self.mlist,
kind='very nice',
howmany='a few'), """\
This is a very nice template. It has a few substitutions. It will be
wrapped.""")
def test_substitutions_no_wrap(self):
self.assertEqual(make('nowrap.txt', self.mlist, wrap=False,
kind='very nice',
howmany='a few'), """\
This is a very nice template.
It has a few substitutions.
It will not be wrapped.
""")<|fim▁end|>
|
class TestMake(unittest.TestCase):
"""Test template interpolation."""
|
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>/*! Provides packet encoding and decoding functionality, as well as the packet enum.
This module does not handle the checksum. If it did, it would be incredibly difficult to write Fastnet tests.*/
pub use self::encoder::*;
pub use self::decoder::*;
use uuid;
use std::cmp;
mod encoder;
mod encoder_tests;
mod decoder;
mod decoder_tests;
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum Packet {
//Status request and response (channel -1)
StatusRequest(StatusRequest),
StatusResponse(StatusResponse),
//Connection handshake (also channel -1).
Connect(uuid::Uuid),
Connected(uuid::Uuid),
Aborted(String),
//Heartbeat (channel -2).
Heartbeat{counter: u64, sent: u64, received: u64},
Echo{endpoint: uuid::Uuid, uuid: uuid::Uuid},
Data{chan: i16, packet: DataPacket},
Ack{chan: i16, sequence_number: u64}
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum StatusRequest {
FastnetQuery,
VersionQuery,
ExtensionQuery(String),
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum StatusResponse {
FastnetResponse(bool),
VersionResponse(String),
ExtensionResponse {name: String, supported: bool},
}
pub const CONNECTION_CHANNEL: i16 = -1;
pub const HEARTBEAT_CHANNEL: i16 = -2;
pub const ECHO_CHANNEL: i16 = -3;
pub const STATUS_REQUEST_SPECIFIER: u8 = 0;
pub const STATUS_RESPONSE_SPECIFIER: u8 = 1;
pub const CONNECT_SPECIFIER: u8 = 2;
pub const CONNECTED_SPECIFIER: u8 = 3;
pub const ABORTED_SPECIFIER: u8 = 4;
//These are used both for query and response.
pub const STATUS_FASTNET_SPECIFIER: u8 = 0;
pub const STATUS_VERSION_SPECIFIER: u8 = 1;
pub const STATUS_EXTENSION_SPECIFIER: u8 = 2;
//Flag bits for data packets, used in the impl of the struct.
pub const DATA_FRAME_START_BIT: u8 = 0;
pub const DATA_FRAME_END_BIT: u8 = 1;
pub const DATA_RELIABLE_BIT: u8 = 2;
pub const DATA_PACKET_SPECIFIER: u8 = 0;
pub const ACK_PACKET_SPECIFIER: u8 = 1;
pub const FRAME_HEADER_SIZE: usize = 12; //64-bit sequence number and 32-bit length.
/**Represents the part of a data packet that a channel must use to assemble packets.
The actual channel itself is stored in the enum variant.
These are ordered by sequence number, for use in trees.*/
#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd)]
pub struct DataPacket {
sequence_number: u64,
flags: u8,
payload: Vec<u8>,
header: Option<FrameHeader>
}
//It would be nice to put this somewhere else, but we unfortunately can't.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Default)]
pub struct FrameHeader {
pub last_reliable_frame: u64,
pub length: u32,
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct DataPacketBuilder {
sequence_number: u64,
is_reliable: bool,
is_frame_start: bool,
is_frame_end: bool,
payload: Vec<u8>,
header: Option<FrameHeader>,
}
impl DataPacketBuilder {
/**Initial state is unreliable, mid-frame, and empty payload.*/
pub fn new(sequence_number: u64)->DataPacketBuilder {
DataPacketBuilder::with_payload(sequence_number, Vec::default())
}
/**Makes a packet with the specified payload, no header, and all flags cleared.*/
pub fn with_payload(sequence_number: u64, payload: Vec<u8>)->DataPacketBuilder {
DataPacketBuilder::with_payload_and_header(sequence_number, payload, None)
}
/**Configures the builder for mid-frame and using the specified header.
If a header is provided, the packet automatically has its first flag set.*/
pub fn with_payload_and_header(sequence_number: u64, payload: Vec<u8>, header: Option<FrameHeader>)->DataPacketBuilder {
DataPacketBuilder {
sequence_number: sequence_number,
is_reliable: false,
is_frame_start: header.is_some(),
is_frame_end: false,
payload: payload,
header: header,
}
}
pub fn set_payload(mut self, payload: Vec<u8>)->Self {
self.payload = payload;
self
}
pub fn set_header(mut self, header: Option<FrameHeader>)->Self {
self.header = header;
self.is_reliable = header.is_some();
self
}
pub fn set_reliable(mut self, reliable: bool)->Self {
self.is_reliable = reliable;
self<|fim▁hole|> pub fn set_frame_start(mut self, start: bool)->Self {
self.is_frame_start = start;
self
}
pub fn set_frame_end(mut self, end: bool)->Self {
self.is_frame_end = end;
self
}
pub fn set_sequence_number(mut self, sequence_number: u64)->Self {
self.sequence_number = sequence_number;
self
}
/**Panics if the packet is invalid. Building invalid packets is a bug.*/
pub fn build(self)->DataPacket {
if self.is_frame_start != self.header.is_some() {
panic!("Header and start flag mismatch. Start flag = {:?}, header = {:?}", self.is_frame_start, self.header);
}
let start_flag = (self.is_frame_start as u8) << DATA_FRAME_START_BIT;
let end_flag = (self.is_frame_end as u8) << DATA_FRAME_END_BIT;
let reliable_flag = (self.is_reliable as u8) << DATA_RELIABLE_BIT;
let flags = start_flag | end_flag | reliable_flag;
DataPacket {
sequence_number: self.sequence_number,
flags: flags,
payload: self.payload,
header: self.header,
}
}
}
impl DataPacket {
pub fn is_reliable(&self)->bool {
(self.flags & DATA_RELIABLE_BIT) > 0
}
pub fn is_frame_start(&self)->bool {
(self.flags & DATA_FRAME_START_BIT) > 0
}
pub fn is_frame_end(&self)->bool {
(self.flags & DATA_FRAME_END_BIT) > 0
}
pub fn sequence_number(&self)->u64 {
self.sequence_number
}
pub fn borrow_header(&self)->Option<&FrameHeader> {
self.header.as_ref()
}
pub fn get_header(&self)->Option<FrameHeader> {
self.header
}
pub fn borrow_payload(&self)->&Vec<u8> {
&self.payload
}
pub fn into_payload(self)->Vec<u8> {
self.payload
}
}
impl FrameHeader {
pub fn new(last_reliable_frame: u64, length: u32)->FrameHeader {
FrameHeader{last_reliable_frame: last_reliable_frame, length: length}
}
}<|fim▁end|>
|
}
|
<|file_name|>sub.rs<|end_file_name|><|fim▁begin|>use super::song_info::SongInfo;
use super::pos::{RowPosition, Point};
use super::{Sentence, SentenceOptions, SentenceParameters, Syllable, SyllableOptions,
SyllableParameters, AsSentenceOptions, AsSyllableOptions};
use ::overlay::*;
use ::overlay::pos::*;
use std::ops::Deref;
use ::utils::*;
#[derive(Debug,Default,Serialize,Deserialize,Clone)]
pub struct Subtitles {
pub sentences: Vec<Sentence>,
#[serde(default)]
pub subtitles_options: SubtitlesOptions,
#[serde(default)]
pub song_info: SongInfo,
}
/// subtitles : already stored Subtitles
/// sentence : Sentence to add to the subtitles
fn set_best_sentence_row(sentences: (&[Sentence], &[Sentence]),
sentence: &mut Sentence,
default_sentence_options: Option<&SentenceOptions>) {
if let Some(row_pos) = sentence.sentence_options.as_ref().and_then(|o| o.row_position) {
sentence.position = row_pos;
return; // life is meaningless
}
let (before_sentences, after_sentences) = sentences;
let sentence_options: Option<SentenceOptions> = sentence.sentence_options
.or_sentence_options(default_sentence_options);
let sentence_parameters =
SentenceParameters::from((sentence_options.unwrap_or(SentenceOptions::default()),
-10000i32));
let mut best_row = 0u8;<|fim▁hole|> sentence.syllables.last(),
sentence_candidate.syllables.first(),
sentence_candidate.syllables.last()) {
(None, _, _, _) | (_, None, _, _) | (_, _, None, _) | (_, _, _, None) => false,
(Some(ref first_syllable),
Some(ref last_syllable),
Some(ref first_syllable_candidate),
Some(ref last_syllable_candidate)) => {
let sentence_candidate_options: Option<SentenceOptions> =
sentence_candidate.sentence_options
.or_sentence_options(default_sentence_options);
let sentence_candidate_parameters =
SentenceParameters::from(
(sentence_candidate_options.unwrap_or(SentenceOptions::default()),-10000i32));
let first_frame = first_syllable.begin
.saturating_sub(sentence_parameters.transition_time_before as u32);
let last_frame = last_syllable.end
.expect("last syllable has no end")
.saturating_add(sentence_parameters.transition_time_after as u32);
let first_frame_candidate = first_syllable_candidate.begin
.saturating_sub(sentence_candidate_parameters.transition_time_before as u32);
let last_frame_candidate = last_syllable_candidate.end
.expect("last syllable has no end")
.saturating_add(sentence_candidate_parameters.transition_time_after as u32);
if (last_frame_candidate >= first_frame &&
last_frame_candidate <= last_frame) ||
(first_frame_candidate >= first_frame &&
first_frame_candidate <= last_frame) ||
(last_frame >= first_frame_candidate &&
last_frame <= last_frame_candidate) ||
(first_frame >= first_frame_candidate &&
first_frame <= last_frame_candidate) {
true
} else {
false
}
}
}
};
// TODO remove unwraps
// step 1 : filter -> filter_map to remove "options" and maybe convert directly in
// RowPosition::Row(_)
let sentences_candidate_before = before_sentences.iter().filter(&filter_fun);
let sentences_candidate_after = after_sentences.iter()
.filter(|s| {
if let Some(ref sentence_options) = s.sentence_options {
sentence_options.row_position.is_some()
} else {
false
}
})
.filter(&filter_fun);
let mut taken = vec![];
for sentence in sentences_candidate_before {
match sentence.position {
RowPosition::Row(i) => {
taken.push(i);
}
_ => {}
}
}
for sentence in sentences_candidate_after {
if let &RowPosition::Row(i) = sentence.sentence_options
.as_ref()
.unwrap()
.row_position
.as_ref()
.unwrap() {
taken.push(i);
};
}
while taken.iter().any(|v| *v == best_row) {
best_row = best_row + 1;
}
}
sentence.position = RowPosition::Row(best_row);
}
impl Subtitles {
pub fn credit_sentences(&self) -> Option<(String, Option<String>)> {
self.song_info.credit_sentences()
}
pub fn check(&self) -> Result<(), String> {
for (s_number, sentence) in self.sentences.iter().enumerate() {
match (sentence.syllables.first(), sentence.syllables.last()) {
(Some(_), Some(&Syllable { end: Some(_), .. })) => {}
(Some(_), Some(&Syllable { end: None, .. })) => {
return Err(format!("Error at sentence {}, no 'end' time for the last syllable",
s_number))
}
_ => {
warn!("Empty sentence {} when checking", s_number);
}
};
}
Ok(())
}
/// length in ms
pub fn post_init(&mut self, duration: u32) {
self.adjust_sentences_row();
let mut options = &mut self.subtitles_options;
let credits_time = 8000;
if (options.credits_time == 0) {
options.credits_time = credits_time;
}
if (options.start_credits_time == 0) {
options.start_credits_time = 4000;
}
if (options.end_credits_time == 0) {
options.end_credits_time = duration.saturating_sub(4000 + credits_time);
}
}
fn adjust_sentences_row(&mut self) {
for i in 0..self.sentences.len() {
let (first_half, mut last_half) = self.sentences.split_at_mut(i);
let (mut middle, last_half) = last_half.split_first_mut().unwrap();
set_best_sentence_row((first_half, last_half),
middle,
self.subtitles_options.as_sentence_options());
}
}
// TODO create a subtitles::Error type and replace String with this
pub fn to_overlay_frame(&self, current_time: u32) -> Result<OverlayFrame, String> {
let mut text_units: Vec<TextUnit> = vec![];
let default_sentence_options: Option<&SentenceOptions> = self.subtitles_options
.as_sentence_options();
let sentence_iter = self.sentences.iter().enumerate().filter(|&(_, ref sentence)| {
let sentence_options: Option<SentenceOptions> = sentence.sentence_options
.or_sentence_options(default_sentence_options);
let sentence_parameters =
SentenceParameters::from((sentence_options.unwrap_or(SentenceOptions::default()),
current_time as i32 -
sentence.syllables.first().unwrap().begin as i32));
match (sentence.syllables.first(), sentence.syllables.last()) {
(None, _) | (_, None) => false,
(Some(&Syllable { begin: first_syllable_begin, .. }),
Some(&Syllable { end: Some(last_syllable_end), .. })) => {
let first_frame = first_syllable_begin
.saturating_sub(sentence_parameters.transition_time_before as u32);
let last_frame = last_syllable_end
.saturating_add(sentence_parameters.transition_time_after as u32);
if (current_time >= first_frame && current_time <= last_frame) {
true
} else {
false
}
}
_ => panic!("Subtitles have not been checked"),
}
}); // get all the sentences displayed on screen
for (_sentence_number, ref sentence) in sentence_iter {
let cur_offset: i32 = current_time as i32 -
sentence.syllables.first().unwrap().begin as i32;
let sentence_alpha =
compute_sentence_alpha(sentence, default_sentence_options, current_time);
let mut text_elts = vec![];
let mut logo_position: Option<u16> = None;
let sentence_options: Option<SentenceOptions> =
sentence.or_sentence_options(default_sentence_options);
let default_syllable_options: Option<SyllableOptions> =
sentence_options.as_syllable_options(cur_offset);
let sentence_params =
SentenceParameters::from((sentence_options.unwrap_or(Default::default()),
cur_offset));
{
for tmp_syllables in sentence.syllables.windows(2) {
let (syllable1, syllable2) = (&tmp_syllables[0], &tmp_syllables[1]);
if !syllable1.text.is_empty() {
add_syllable(&mut text_elts,
syllable1,
Some(syllable2),
default_syllable_options.as_ref(),
current_time,
sentence_alpha);
}
}
match sentence.syllables.last() {
Some(last_syllable) => {
add_syllable(&mut text_elts,
last_syllable,
None,
default_syllable_options.as_ref(),
current_time,
sentence_alpha);
}
_ => {}
}
}
if sentence_params.display_logo {
'syllables: for (n, syllable) in sentence.syllables.iter().enumerate() {
if current_time >= syllable.begin {
logo_position = Some(n as u16);
} else {
break 'syllables;
}
}
match sentence.syllables.last() {
Some(ref syllable) => {
if (current_time > syllable.end.unwrap()) {
logo_position = None;
}
}
None => {}
}
match logo_position {
Some(logo_position) => {
match text_elts.get_mut(logo_position as usize) {
Some(ref mut text_elt) => {
text_elt.attach_logo = true;
}
None => error!("Unexpected None in getting from logo_position !"),
}
}
None => {}
}
}
let text_pos = match sentence_params.row_position.unwrap_or(sentence.position) {
RowPosition::Row(l) => {
(PosX::Centered, PosY::FromTopPercent(l as f32 * 0.15 + 0.01))
}
RowPosition::ForcePos(Point { x, y }) => {
(PosX::FromLeftPercent(x), PosY::FromTopPercent(y))
}
};
let text_unit = TextUnit {
text: text_elts,
size: Size::FitPercent(sentence_params.size.width, sentence_params.size.height),
pos: text_pos,
anchor: (0.5, 0.0),
};
text_units.push(text_unit);
}
Ok(OverlayFrame { text_units: text_units })
}
}
impl Deref for SubtitlesOptions {
type Target = Option<SentenceOptions>;
fn deref(&self) -> &Option<SentenceOptions> {
&self.sentence_options
}
}
#[derive(Debug,Default,Clone,Serialize,Deserialize)]
pub struct SubtitlesOptions {
/// Global SentenceOptions
pub sentence_options: Option<SentenceOptions>,
#[serde(default)]
pub start_credits_time: u32,
#[serde(default)]
pub end_credits_time: u32,
#[serde(default)]
pub credits_time: u32,
}
fn add_syllable(mut text_subunits: &mut Vec<TextSubUnit>,
syllable: &Syllable,
next_syllable: Option<&Syllable>,
default_syllable_options: Option<&SyllableOptions>,
current_frame: u32,
alpha: f32) {
let syllable_end = syllable.end
.or(next_syllable.map(|s| s.begin.saturating_sub(1)))
.expect("File has not been checked properly : end syllable has no end frame");
let syllable_options = syllable.syllable_options.or_syllable_options(default_syllable_options);
let syllable_parameters =
SyllableParameters::from(syllable_options.unwrap_or(SyllableOptions::default()));
let outline = Outline::from(syllable_parameters.outline);
let alive_color = AlphaColor::from(Color::from(syllable_parameters.alive_color));
let transition_color = Color::from(syllable_parameters.transition_color);
let dead_color = Color::from(syllable_parameters.dead_color);
if (current_frame < syllable.begin) {
let text_sub_unit = TextSubUnit {
text: syllable.text.clone(),
color: fade_color(alive_color, alpha),
outline: outline,
shadow: None,
attach_logo: false,
};
text_subunits.push(text_sub_unit);
} else if (syllable.begin <= current_frame) && (current_frame <= syllable_end) {
let percent = (current_frame - syllable.begin) as f32 /
(syllable_end - syllable.begin) as f32;
// lets ease the percent a lil bits
let percent = 1.0 - (1.0 - percent * percent).sqrt();
let transition_color = AlphaColor::from(mix_colors(transition_color, dead_color, percent));
let text_sub_unit = TextSubUnit {
text: syllable.text.clone(),
color: transition_color,
outline: outline,
shadow: None,
attach_logo: false,
};
text_subunits.push(text_sub_unit);
} else {
let text_sub_unit = TextSubUnit {
text: syllable.text.clone(),
color: fade_color(AlphaColor::from(dead_color), alpha),
outline: outline,
shadow: None,
attach_logo: false,
};
text_subunits.push(text_sub_unit);
}
}
fn compute_sentence_alpha(sentence: &Sentence,
default_sentence_options: Option<&SentenceOptions>,
frame_number: u32)
-> f32 {
let sentence_options: Option<SentenceOptions> = sentence.sentence_options
.or_sentence_options(default_sentence_options);
let sentence_parameters =
SentenceParameters::from((sentence_options.unwrap_or(SentenceOptions::default()),
frame_number as i32 -
sentence.syllables.first().unwrap().begin as i32));
match (sentence.syllables.first(), sentence.syllables.last()) {
(Some(&Syllable { begin: frame_begin, .. }),
Some(&Syllable { end: Some(frame_end), .. })) => {
let end_fade_frame_before: u32 =
(sentence_parameters.transition_time_before -
sentence_parameters.fade_time_before) as u32;
let end_fade_frame_after: u32 =
(sentence_parameters.transition_time_after -
sentence_parameters.fade_time_after) as u32;
let begin_first_fade_frame =
frame_begin.saturating_sub(sentence_parameters.transition_time_before as u32);
let end_first_fade_frame = frame_begin.saturating_sub(end_fade_frame_before);
let begin_second_fade_frame = frame_end.saturating_add(end_fade_frame_after);
let end_second_fade_frame =
frame_end.saturating_add(sentence_parameters.transition_time_after as u32);
debug_assert_eq!(end_second_fade_frame - begin_second_fade_frame,
sentence_parameters.fade_time_after as u32);
if (end_first_fade_frame < frame_number && begin_second_fade_frame > frame_number) {
1.0
} else if begin_first_fade_frame <= frame_number &&
end_first_fade_frame >= frame_number {
(frame_number - begin_first_fade_frame) as f32 /
(end_first_fade_frame - begin_first_fade_frame) as f32
} else if begin_second_fade_frame <= frame_number &&
end_second_fade_frame >= frame_number {
1.0 -
((frame_number - begin_second_fade_frame) as f32 /
(end_second_fade_frame - begin_second_fade_frame) as f32)
} else {
0.0
}
}
_ => 0.0,
}
}<|fim▁end|>
|
{
let filter_fun = |sentence_candidate: &&Sentence| {
match (sentence.syllables.first(),
|
<|file_name|>integration_test.go<|end_file_name|><|fim▁begin|>// Copyright (c) 2014 Couchbase, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package test
import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"math"
"os"
"path/filepath"
"reflect"
"regexp"
"testing"
"github.com/blevesearch/bleve/v2"
"github.com/blevesearch/bleve/v2/mapping"
// allow choosing alternate kvstores
_ "github.com/blevesearch/bleve/v2/config"
)
var dataset = flag.String("dataset", "", "only test datasets matching this regex")
var onlynum = flag.Int("testnum", -1, "only run the test with this number")
var keepIndex = flag.Bool("keepIndex", false, "keep the index after testing")
var indexType = flag.String("indexType", bleve.Config.DefaultIndexType, "index type to build")
var kvType = flag.String("kvType", bleve.Config.DefaultKVStore, "kv store type to build")
var segType = flag.String("segType", "", "force scorch segment type")
var segVer = flag.Int("segVer", 0, "force scorch segment version")
func TestIntegration(t *testing.T) {
flag.Parse()
t.Logf("using index type %s and kv type %s", *indexType, *kvType)
if *segType != "" {
t.Logf("forcing segment type: %s", *segType)
}
if *segVer != 0 {
t.Logf("forcing segment version: %d", *segVer)<|fim▁hole|> if *dataset != "" {
datasetRegexp, err = regexp.Compile(*dataset)
if err != nil {
t.Fatal(err)
}
}
fis, err := ioutil.ReadDir("tests")
if err != nil {
t.Fatal(err)
}
for _, fi := range fis {
if datasetRegexp != nil {
if !datasetRegexp.MatchString(fi.Name()) {
continue
}
}
if fi.IsDir() {
t.Logf("Running test: %s", fi.Name())
runTestDir(t, "tests"+string(filepath.Separator)+fi.Name(), fi.Name())
}
}
}
func runTestDir(t *testing.T, dir, datasetName string) {
// read the mapping
mappingBytes, err := ioutil.ReadFile(dir + string(filepath.Separator) + "mapping.json")
if err != nil {
t.Errorf("error reading mapping: %v", err)
return
}
var mapping mapping.IndexMappingImpl
err = json.Unmarshal(mappingBytes, &mapping)
if err != nil {
t.Errorf("error unmarshalling mapping: %v", err)
return
}
var index bleve.Index
var cleanup func()
// if there is a dir named 'data' open single index
_, err = os.Stat(dir + string(filepath.Separator) + "data")
if !os.IsNotExist(err) {
index, cleanup, err = loadDataSet(t, datasetName, mapping, dir+string(filepath.Separator)+"data")
if err != nil {
t.Errorf("error loading dataset: %v", err)
return
}
defer cleanup()
} else {
// if there is a dir named 'datasets' build alias over each index
_, err = os.Stat(dir + string(filepath.Separator) + "datasets")
if !os.IsNotExist(err) {
index, cleanup, err = loadDataSets(t, datasetName, mapping, dir+string(filepath.Separator)+"datasets")
if err != nil {
t.Errorf("error loading dataset: %v", err)
return
}
defer cleanup()
}
}
// read the searches
searchBytes, err := ioutil.ReadFile(dir + string(filepath.Separator) + "searches.json")
if err != nil {
t.Errorf("error reading searches: %v", err)
return
}
var searches SearchTests
err = json.Unmarshal(searchBytes, &searches)
if err != nil {
t.Errorf("error unmarshalling searches: %v", err)
return
}
// run the searches
for testNum, search := range searches {
if *onlynum < 0 || (*onlynum > 0 && testNum == *onlynum) {
res, err := index.Search(search.Search)
if err != nil {
t.Errorf("error running search: %v", err)
}
if res.Total != search.Result.Total {
t.Errorf("test error - %s", search.Comment)
t.Errorf("test %d - expected total: %d got %d", testNum, search.Result.Total, res.Total)
continue
}
if len(res.Hits) != len(search.Result.Hits) {
t.Errorf("test error - %s", search.Comment)
t.Errorf("test %d - expected hits len: %d got %d", testNum, len(search.Result.Hits), len(res.Hits))
t.Errorf("got hits: %v", res.Hits)
continue
}
for hi, hit := range search.Result.Hits {
if hit.ID != res.Hits[hi].ID {
t.Errorf("test error - %s", search.Comment)
t.Errorf("test %d - expected hit %d to have ID %s got %s", testNum, hi, hit.ID, res.Hits[hi].ID)
}
if hit.Fields != nil {
if !reflect.DeepEqual(hit.Fields, res.Hits[hi].Fields) {
t.Errorf("test error - %s", search.Comment)
t.Errorf("test %d - expected hit %d to have fields %#v got %#v", testNum, hi, hit.Fields, res.Hits[hi].Fields)
}
}
if hit.Fragments != nil {
if !reflect.DeepEqual(hit.Fragments, res.Hits[hi].Fragments) {
t.Errorf("test error - %s", search.Comment)
t.Errorf("test %d - expected hit %d to have fragments %#v got %#v", testNum, hi, hit.Fragments, res.Hits[hi].Fragments)
}
}
if hit.Locations != nil {
if !reflect.DeepEqual(hit.Locations, res.Hits[hi].Locations) {
t.Errorf("test error - %s", search.Comment)
t.Errorf("test %d - expected hit %d to have locations %#v got %#v", testNum, hi, hit.Locations, res.Hits[hi].Locations)
}
}
// assert that none of the scores were NaN,+Inf,-Inf
if math.IsInf(res.Hits[hi].Score, 0) || math.IsNaN(res.Hits[hi].Score) {
t.Errorf("test error - %s", search.Comment)
t.Errorf("test %d - invalid score %f", testNum, res.Hits[hi].Score)
}
}
if search.Result.Facets != nil {
if !reflect.DeepEqual(search.Result.Facets, res.Facets) {
t.Errorf("test error - %s", search.Comment)
t.Errorf("test %d - expected facets: %#v got %#v", testNum, search.Result.Facets, res.Facets)
}
}
if _, ok := index.(bleve.IndexAlias); !ok {
// check that custom index name is in results
for _, hit := range res.Hits {
if hit.Index != datasetName {
t.Fatalf("expected name: %s, got: %s", datasetName, hit.Index)
}
}
}
}
}
}
func loadDataSet(t *testing.T, datasetName string, mapping mapping.IndexMappingImpl, path string) (bleve.Index, func(), error) {
idxPath := fmt.Sprintf("test-%s.bleve", datasetName)
cfg := map[string]interface{}{}
if *segType != "" {
cfg["forceSegmentType"] = *segType
}
if *segVer != 0 {
cfg["forceSegmentVersion"] = *segVer
}
index, err := bleve.NewUsing(idxPath, &mapping, *indexType, *kvType, cfg)
if err != nil {
return nil, nil, fmt.Errorf("error creating new index: %v", err)
}
// set a custom index name
index.SetName(datasetName)
// index data
fis, err := ioutil.ReadDir(path)
if err != nil {
return nil, nil, fmt.Errorf("error reading data dir: %v", err)
}
for _, fi := range fis {
fileBytes, err := ioutil.ReadFile(path + string(filepath.Separator) + fi.Name())
if err != nil {
return nil, nil, fmt.Errorf("error reading data file: %v", err)
}
var fileDoc interface{}
err = json.Unmarshal(fileBytes, &fileDoc)
if err != nil {
return nil, nil, fmt.Errorf("error parsing data file as json: %v", err)
}
filename := fi.Name()
ext := filepath.Ext(filename)
id := filename[0 : len(filename)-len(ext)]
err = index.Index(id, fileDoc)
if err != nil {
return nil, nil, fmt.Errorf("error indexing data: %v", err)
}
}
cleanup := func() {
err := index.Close()
if err != nil {
t.Fatalf("error closing index: %v", err)
}
if !*keepIndex {
err := os.RemoveAll(idxPath)
if err != nil {
t.Fatalf("error removing index: %v", err)
}
}
}
return index, cleanup, nil
}
func loadDataSets(t *testing.T, datasetName string, mapping mapping.IndexMappingImpl, path string) (bleve.Index, func(), error) {
fis, err := ioutil.ReadDir(path)
if err != nil {
return nil, nil, fmt.Errorf("error reading datasets dir: %v", err)
}
var cleanups []func()
alias := bleve.NewIndexAlias()
for _, fi := range fis {
idx, idxCleanup, err := loadDataSet(t, fi.Name(), mapping, path+string(filepath.Separator)+fi.Name())
if err != nil {
return nil, nil, fmt.Errorf("error loading dataset: %v", err)
}
cleanups = append(cleanups, idxCleanup)
alias.Add(idx)
}
alias.SetName(datasetName)
cleanupAll := func() {
for _, cleanup := range cleanups {
cleanup()
}
}
return alias, cleanupAll, nil
}<|fim▁end|>
|
}
var err error
var datasetRegexp *regexp.Regexp
|
<|file_name|>0006_auto_20160716_1641.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-16 16:41
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('campaign', '0005_auto_20160716_1624'),
]
operations = [
migrations.AddField(<|fim▁hole|> migrations.AlterField(
model_name='campaign',
name='created',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='gatewayproperty',
name='value',
field=models.CharField(blank=True, max_length=255, null=True),
),
]<|fim▁end|>
|
model_name='charity',
name='gateway',
field=models.ManyToManyField(through='campaign.GatewayProperty', to='campaign.Gateway'),
),
|
<|file_name|>test.ts<|end_file_name|><|fim▁begin|>/*
* Copyright 2017 Banco Bilbao Vizcaya Argentaria, S.A.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// This file is required by karma.conf.js and loads recursively all the .spec and framework files
import 'zone.js/dist/zone-testing';
import { getTestBed } from '@angular/core/testing';
import {
BrowserDynamicTestingModule,
platformBrowserDynamicTesting
} from '@angular/platform-browser-dynamic/testing';
declare const require: any;
// First, initialize the Angular testing environment.
getTestBed().initTestEnvironment(<|fim▁hole|>);
// Then we find all the tests.
const context = require.context('./', true, /\.spec\.ts$/);
// And load the modules.
context.keys().map(context);<|fim▁end|>
|
BrowserDynamicTestingModule,
platformBrowserDynamicTesting()
|
<|file_name|>prediction_metrics_manager_test.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License<|fim▁hole|>
"""This file invokes predictionmetricsmanager.py tests
TODO: Move these tests to unit test format.
"""
from nupic.frameworks.opf.predictionmetricsmanager import (
test as predictionMetricsManagerTest)
if __name__ == "__main__":
predictionMetricsManagerTest()<|fim▁end|>
|
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
|
<|file_name|>authentication.py<|end_file_name|><|fim▁begin|>import urllib
def basic_authentication(username=None, password=None, protocol="http"):
from .fixtures import server_config, url
build_url = url(server_config())
query = {}
return build_url("/webdriver/tests/support/authentication.py",
query=urllib.urlencode(query),<|fim▁hole|>
def main(request, response):
user = request.auth.username
password = request.auth.password
if user == "user" and password == "password":
return "Authentication done"
realm = "test"
if "realm" in request.GET:
realm = request.GET.first("realm")
return ((401, "Unauthorized"),
[("WWW-Authenticate", 'Basic realm="' + realm + '"')],
"Please login with credentials 'user' and 'password'")<|fim▁end|>
|
protocol=protocol)
|
<|file_name|>AutoUpdateReceiver.java<|end_file_name|><|fim▁begin|>package com.coolweather.app.receiver;
import com.coolweather.app.service.AutoUpdateService;
import android.content.BroadcastReceiver;
import android.content.Context;
import android.content.Intent;
public class AutoUpdateReceiver extends BroadcastReceiver{
@Override
public void onReceive(Context context,Intent intent){
Intent i = new Intent(context,AutoUpdateService.class);
context.startService(i);
}<|fim▁hole|><|fim▁end|>
|
}
|
<|file_name|>serialization_profile.py<|end_file_name|><|fim▁begin|>from __future__ import print_function
import shutil
import os.path
import tempfile
import cProfile
import pstats
import nineml
from nineml.utils.comprehensive_example import (
instances_of_all_types, v1_safe_docs)
from nineml.serialization import ext_to_format, format_to_serializer
format_to_ext = dict((v, k) for k, v in ext_to_format.items()) # @UndefinedVariable @IgnorePep8
print_serialized = False
printable = ('xml', 'json', 'yaml')
_tmp_dir = tempfile.mkdtemp()
def function():<|fim▁hole|> for version in (1.0, 2.0):
if version == 1.0:
docs = v1_safe_docs
else:
docs = list(instances_of_all_types['NineML'].values())
for format in format_to_serializer: # @ReservedAssignment
try:
ext = format_to_ext[format]
except KeyError:
continue # ones that can't be written to file (e.g. dict)
for i, document in enumerate(docs):
doc = document.clone()
url = os.path.join(
_tmp_dir, 'test{}v{}{}'.format(i, version, ext))
nineml.write(url, doc, format=format, version=version,
indent=2)
if print_serialized and format in printable:
with open(url) as f:
print(f.read())
reread_doc = nineml.read(url, reload=True) # @UnusedVariable
shutil.rmtree(_tmp_dir)
out_file = os.path.join(os.getcwd(), 'serial_profile.out')
cProfile.run('function()', out_file)
p = pstats.Stats(out_file)
p.sort_stats('cumtime').print_stats()<|fim▁end|>
| |
<|file_name|>WebSecurityConfigurerAdapterTests.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2002-2018 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.security.config.annotation.web;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import javax.servlet.FilterChain;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.junit.Rule;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.ArgumentCaptor;
import org.powermock.core.classloader.annotations.PowerMockIgnore;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationListener;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.annotation.AnnotationAwareOrderComparator;
import org.springframework.core.annotation.Order;
import org.springframework.security.authentication.AuthenticationTrustResolver;
import org.springframework.security.authentication.event.AuthenticationSuccessEvent;
import org.springframework.security.config.annotation.authentication.builders.AuthenticationManagerBuilder;
import org.springframework.security.config.annotation.web.builders.HttpSecurity;
import org.springframework.security.config.annotation.web.configuration.EnableWebSecurity;
import org.springframework.security.config.annotation.web.configuration.WebSecurityConfigurerAdapter;
import org.springframework.security.config.test.SpringTestRule;
import org.springframework.security.core.userdetails.PasswordEncodedUser;
import org.springframework.security.core.userdetails.UserDetailsService;
import org.springframework.security.core.userdetails.UsernameNotFoundException;
import org.springframework.security.web.authentication.UsernamePasswordAuthenticationFilter;
import org.springframework.security.web.context.request.async.SecurityContextCallableProcessingInterceptor;
import org.springframework.test.web.servlet.MockMvc;
import org.springframework.web.accept.ContentNegotiationStrategy;
import org.springframework.web.accept.HeaderContentNegotiationStrategy;
import org.springframework.web.context.request.async.CallableProcessingInterceptor;
import org.springframework.web.context.request.async.WebAsyncManager;
import org.springframework.web.context.request.async.WebAsyncUtils;
import org.springframework.web.filter.OncePerRequestFilter;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.ThrowableAssert.catchThrowable;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.atLeastOnce;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.springframework.security.test.web.servlet.request.SecurityMockMvcRequestBuilders.formLogin;
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get;
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.header;
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status;
/**
* Tests for {@link WebSecurityConfigurerAdapter}.
*
* @author Rob Winch
* @author Joe Grandja
*/
@PrepareForTest({WebAsyncManager.class})
@RunWith(PowerMockRunner.class)
@PowerMockIgnore({ "org.w3c.dom.*", "org.xml.sax.*", "org.apache.xerces.*", "javax.xml.parsers.*", "javax.xml.transform.*" })
public class WebSecurityConfigurerAdapterTests {
@Rule
public final SpringTestRule spring = new SpringTestRule();
@Autowired
private MockMvc mockMvc;
@Test
public void loadConfigWhenRequestSecureThenDefaultSecurityHeadersReturned() throws Exception {
this.spring.register(HeadersArePopulatedByDefaultConfig.class).autowire();
this.mockMvc.perform(get("/").secure(true))
.andExpect(header().string("X-Content-Type-Options", "nosniff"))
.andExpect(header().string("X-Frame-Options", "DENY"))
.andExpect(header().string("Strict-Transport-Security", "max-age=31536000 ; includeSubDomains"))
.andExpect(header().string("Cache-Control", "no-cache, no-store, max-age=0, must-revalidate"))
.andExpect(header().string("Pragma", "no-cache"))
.andExpect(header().string("Expires", "0"))
.andExpect(header().string("X-XSS-Protection", "1; mode=block"));
}
@EnableWebSecurity
static class HeadersArePopulatedByDefaultConfig extends WebSecurityConfigurerAdapter {
@Override
protected void configure(AuthenticationManagerBuilder auth) throws Exception {
auth
.inMemoryAuthentication()
.withUser(PasswordEncodedUser.user());
}
@Override
protected void configure(HttpSecurity http) throws Exception {
}
}
@Test
public void loadConfigWhenDefaultConfigThenWebAsyncManagerIntegrationFilterAdded() throws Exception {
this.spring.register(WebAsyncPopulatedByDefaultConfig.class).autowire();
WebAsyncManager webAsyncManager = mock(WebAsyncManager.class);
this.mockMvc.perform(get("/").requestAttr(WebAsyncUtils.WEB_ASYNC_MANAGER_ATTRIBUTE, webAsyncManager));
ArgumentCaptor<CallableProcessingInterceptor> callableProcessingInterceptorArgCaptor =
ArgumentCaptor.forClass(CallableProcessingInterceptor.class);
verify(webAsyncManager, atLeastOnce()).registerCallableInterceptor(any(), callableProcessingInterceptorArgCaptor.capture());
CallableProcessingInterceptor callableProcessingInterceptor =
callableProcessingInterceptorArgCaptor.getAllValues().stream()
.filter(e -> SecurityContextCallableProcessingInterceptor.class.isAssignableFrom(e.getClass()))
.findFirst()
.orElse(null);
assertThat(callableProcessingInterceptor).isNotNull();
}
@EnableWebSecurity
static class WebAsyncPopulatedByDefaultConfig extends WebSecurityConfigurerAdapter {
@Override
protected void configure(AuthenticationManagerBuilder auth) throws Exception {
auth
.inMemoryAuthentication()
.withUser(PasswordEncodedUser.user());
}
@Override
protected void configure(HttpSecurity http) throws Exception {
}
}
@Test
public void loadConfigWhenRequestAuthenticateThenAuthenticationEventPublished() throws Exception {
this.spring.register(InMemoryAuthWithWebSecurityConfigurerAdapter.class).autowire();
this.mockMvc.perform(formLogin())
.andExpect(status().is3xxRedirection());
assertThat(InMemoryAuthWithWebSecurityConfigurerAdapter.EVENTS).isNotEmpty();
assertThat(InMemoryAuthWithWebSecurityConfigurerAdapter.EVENTS).hasSize(1);
}
@EnableWebSecurity
static class InMemoryAuthWithWebSecurityConfigurerAdapter extends WebSecurityConfigurerAdapter
implements ApplicationListener<AuthenticationSuccessEvent> {
static List<AuthenticationSuccessEvent> EVENTS = new ArrayList<>();
@Override
protected void configure(AuthenticationManagerBuilder auth) throws Exception {
auth
.inMemoryAuthentication()
.withUser(PasswordEncodedUser.user());
}
@Override
public void onApplicationEvent(AuthenticationSuccessEvent event) {
EVENTS.add(event);
}
}
@Test
public void loadConfigWhenInMemoryConfigureProtectedThenPasswordUpgraded() throws Exception {
this.spring.register(InMemoryConfigureProtectedConfig.class).autowire();
this.mockMvc.perform(formLogin())
.andExpect(status().is3xxRedirection());
UserDetailsService uds = this.spring.getContext()
.getBean(UserDetailsService.class);
assertThat(uds.loadUserByUsername("user").getPassword()).startsWith("{bcrypt}");
}
@EnableWebSecurity
static class InMemoryConfigureProtectedConfig extends WebSecurityConfigurerAdapter {
@Override
protected void configure(AuthenticationManagerBuilder auth) throws Exception {
auth
.inMemoryAuthentication()
.withUser(PasswordEncodedUser.user());
}
@Override
@Bean
public UserDetailsService userDetailsServiceBean() throws Exception {
return super.userDetailsServiceBean();
}
}
@Test
public void loadConfigWhenInMemoryConfigureGlobalThenPasswordUpgraded() throws Exception {
this.spring.register(InMemoryConfigureGlobalConfig.class).autowire();
this.mockMvc.perform(formLogin())
.andExpect(status().is3xxRedirection());
UserDetailsService uds = this.spring.getContext()
.getBean(UserDetailsService.class);
assertThat(uds.loadUserByUsername("user").getPassword()).startsWith("{bcrypt}");
}
@EnableWebSecurity
static class InMemoryConfigureGlobalConfig extends WebSecurityConfigurerAdapter {
@Autowired
public void configureGlobal(AuthenticationManagerBuilder auth) throws Exception {
auth
.inMemoryAuthentication()
.withUser(PasswordEncodedUser.user());
}
@Override
@Bean
public UserDetailsService userDetailsServiceBean() throws Exception {
return super.userDetailsServiceBean();
}
}
@Test
public void loadConfigWhenCustomContentNegotiationStrategyBeanThenOverridesDefault() throws Exception {
OverrideContentNegotiationStrategySharedObjectConfig.CONTENT_NEGOTIATION_STRATEGY_BEAN = mock(ContentNegotiationStrategy.class);
this.spring.register(OverrideContentNegotiationStrategySharedObjectConfig.class).autowire();
OverrideContentNegotiationStrategySharedObjectConfig securityConfig =
this.spring.getContext().getBean(OverrideContentNegotiationStrategySharedObjectConfig.class);
assertThat(securityConfig.contentNegotiationStrategySharedObject).isNotNull();
assertThat(securityConfig.contentNegotiationStrategySharedObject)
.isSameAs(OverrideContentNegotiationStrategySharedObjectConfig.CONTENT_NEGOTIATION_STRATEGY_BEAN);
}
@EnableWebSecurity
static class OverrideContentNegotiationStrategySharedObjectConfig extends WebSecurityConfigurerAdapter {
static ContentNegotiationStrategy CONTENT_NEGOTIATION_STRATEGY_BEAN;
private ContentNegotiationStrategy contentNegotiationStrategySharedObject;
@Bean
public ContentNegotiationStrategy contentNegotiationStrategy() {
return CONTENT_NEGOTIATION_STRATEGY_BEAN;
}
@Override
protected void configure(HttpSecurity http) throws Exception {
this.contentNegotiationStrategySharedObject = http.getSharedObject(ContentNegotiationStrategy.class);
super.configure(http);
}
}
@Test
public void loadConfigWhenDefaultContentNegotiationStrategyThenHeaderContentNegotiationStrategy() throws Exception {
this.spring.register(ContentNegotiationStrategyDefaultSharedObjectConfig.class).autowire();
ContentNegotiationStrategyDefaultSharedObjectConfig securityConfig =
this.spring.getContext().getBean(ContentNegotiationStrategyDefaultSharedObjectConfig.class);
assertThat(securityConfig.contentNegotiationStrategySharedObject).isNotNull();
assertThat(securityConfig.contentNegotiationStrategySharedObject).isInstanceOf(HeaderContentNegotiationStrategy.class);
}
@EnableWebSecurity
static class ContentNegotiationStrategyDefaultSharedObjectConfig extends WebSecurityConfigurerAdapter {
private ContentNegotiationStrategy contentNegotiationStrategySharedObject;
@Override
protected void configure(HttpSecurity http) throws Exception {
this.contentNegotiationStrategySharedObject = http.getSharedObject(ContentNegotiationStrategy.class);
super.configure(http);
}
}
@Test
public void loadConfigWhenUserDetailsServiceHasCircularReferenceThenStillLoads() throws Exception {
this.spring.register(RequiresUserDetailsServiceConfig.class, UserDetailsServiceConfig.class).autowire();
MyFilter myFilter = this.spring.getContext().getBean(MyFilter.class);
Throwable thrown = catchThrowable(() -> myFilter.userDetailsService.loadUserByUsername("user") );
assertThat(thrown).isNull();
thrown = catchThrowable(() -> myFilter.userDetailsService.loadUserByUsername("admin") );
assertThat(thrown).isInstanceOf(UsernameNotFoundException.class);
}
@Configuration
static class RequiresUserDetailsServiceConfig {
@Bean
public MyFilter myFilter(UserDetailsService userDetailsService) {
return new MyFilter(userDetailsService);
}
}
@EnableWebSecurity
static class UserDetailsServiceConfig extends WebSecurityConfigurerAdapter {
@Autowired
private MyFilter myFilter;
@Bean
@Override
public UserDetailsService userDetailsServiceBean() throws Exception {
return super.userDetailsServiceBean();
}
@Override
public void configure(HttpSecurity http) {
http.addFilterBefore(this.myFilter, UsernamePasswordAuthenticationFilter.class);
}
@Override
protected void configure(AuthenticationManagerBuilder auth) throws Exception {
auth
.inMemoryAuthentication()
.withUser(PasswordEncodedUser.user());
}
}
static class MyFilter extends OncePerRequestFilter {
private UserDetailsService userDetailsService;
MyFilter(UserDetailsService userDetailsService) {
this.userDetailsService = userDetailsService;
}
@Override
protected void doFilterInternal(HttpServletRequest request,
HttpServletResponse response,
FilterChain filterChain) throws ServletException, IOException {<|fim▁hole|> }
}
// SEC-2274: WebSecurityConfigurer adds ApplicationContext as a shared object
@Test
public void loadConfigWhenSharedObjectsCreatedThenApplicationContextAdded() throws Exception {
this.spring.register(ApplicationContextSharedObjectConfig.class).autowire();
ApplicationContextSharedObjectConfig securityConfig =
this.spring.getContext().getBean(ApplicationContextSharedObjectConfig.class);
assertThat(securityConfig.applicationContextSharedObject).isNotNull();
assertThat(securityConfig.applicationContextSharedObject).isSameAs(this.spring.getContext());
}
@EnableWebSecurity
static class ApplicationContextSharedObjectConfig extends WebSecurityConfigurerAdapter {
private ApplicationContext applicationContextSharedObject;
@Override
protected void configure(HttpSecurity http) throws Exception {
this.applicationContextSharedObject = http.getSharedObject(ApplicationContext.class);
super.configure(http);
}
}
@Test
public void loadConfigWhenCustomAuthenticationTrustResolverBeanThenOverridesDefault() throws Exception {
CustomTrustResolverConfig.AUTHENTICATION_TRUST_RESOLVER_BEAN = mock(AuthenticationTrustResolver.class);
this.spring.register(CustomTrustResolverConfig.class).autowire();
CustomTrustResolverConfig securityConfig =
this.spring.getContext().getBean(CustomTrustResolverConfig.class);
assertThat(securityConfig.authenticationTrustResolverSharedObject).isNotNull();
assertThat(securityConfig.authenticationTrustResolverSharedObject)
.isSameAs(CustomTrustResolverConfig.AUTHENTICATION_TRUST_RESOLVER_BEAN);
}
@EnableWebSecurity
static class CustomTrustResolverConfig extends WebSecurityConfigurerAdapter {
static AuthenticationTrustResolver AUTHENTICATION_TRUST_RESOLVER_BEAN;
private AuthenticationTrustResolver authenticationTrustResolverSharedObject;
@Bean
public AuthenticationTrustResolver authenticationTrustResolver() {
return AUTHENTICATION_TRUST_RESOLVER_BEAN;
}
@Override
protected void configure(HttpSecurity http) throws Exception {
this.authenticationTrustResolverSharedObject = http.getSharedObject(AuthenticationTrustResolver.class);
super.configure(http);
}
}
@Test
public void compareOrderWebSecurityConfigurerAdapterWhenLowestOrderToDefaultOrderThenGreaterThanZero() throws Exception {
AnnotationAwareOrderComparator comparator = new AnnotationAwareOrderComparator();
assertThat(comparator.compare(
new LowestPriorityWebSecurityConfig(),
new DefaultOrderWebSecurityConfig())).isGreaterThan(0);
}
static class DefaultOrderWebSecurityConfig extends WebSecurityConfigurerAdapter {
}
@Order
static class LowestPriorityWebSecurityConfig extends WebSecurityConfigurerAdapter {
}
}<|fim▁end|>
|
filterChain.doFilter(request, response);
|
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>pub struct Allergies(u32);
impl Allergies {
pub fn new(x: u32) -> Allergies {
Allergies(x)
}
pub fn is_allergic_to(&self, allergen: &Allergen) -> bool {
allergen.value() & self.0 != 0
}
pub fn allergies(&self) -> Vec<Allergen> {
Allergen::all()<|fim▁hole|> .into_iter()
.filter(|ref a| self.is_allergic_to(&a))
.collect()
}
}
macro_rules! allergens {
{ $( $name:ident => $order:expr ),* }
=> {
#[derive(Debug, PartialEq)]
pub enum Allergen {
$( $name ),*
}
impl Allergen {
fn value(&self) -> u32 {
match *self {
$( Allergen::$name => $order ),*
}
}
fn all() -> Vec<Allergen> {
vec![ $( Allergen::$name ),* ]
}
}
}
}
allergens!{
Eggs => 1,
Peanuts => 2,
Shellfish => 4,
Strawberries => 8,
Tomatoes => 16,
Chocolate => 32,
Pollen => 64,
Cats => 128
}<|fim▁end|>
| |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>use std::fmt;
use std::error::Error;
use std::sync::Mutex;
use CapabilitiesSource;
use gl;
use version::Api;
use version::Version;
pub use self::compute::{ComputeShader, ComputeCommand};
pub use self::program::Program;
pub use self::reflection::{Uniform, UniformBlock, BlockLayout, OutputPrimitives};
pub use self::reflection::{Attribute, TransformFeedbackVarying, TransformFeedbackBuffer, TransformFeedbackMode};
mod compute;
mod program;
mod raw;
mod reflection;
mod shader;
mod uniforms_storage;
/// Returns true if the backend supports geometry shaders.
#[inline]
pub fn is_geometry_shader_supported<C>(ctxt: &C) -> bool where C: CapabilitiesSource {
shader::check_shader_type_compatibility(ctxt, gl::GEOMETRY_SHADER)
}
/// Returns true if the backend supports tessellation shaders.
#[inline]
pub fn is_tessellation_shader_supported<C>(ctxt: &C) -> bool where C: CapabilitiesSource {
shader::check_shader_type_compatibility(ctxt, gl::TESS_CONTROL_SHADER)
}
/// Returns true if the backend supports creating and retreiving binary format.
#[inline]
pub fn is_binary_supported<C>(ctxt: &C) -> bool where C: CapabilitiesSource {
ctxt.get_version() >= &Version(Api::Gl, 4, 1) || ctxt.get_version() >= &Version(Api::GlEs, 2, 0)
|| ctxt.get_extensions().gl_arb_get_programy_binary
}
/// Some shader compilers have race-condition issues, so we lock this mutex
/// in the GL thread every time we compile a shader or link a program.
// TODO: replace by a StaticMutex
lazy_static! {
static ref COMPILER_GLOBAL_LOCK: Mutex<()> = Mutex::new(());
}
/// Error that can be triggered when creating a `Program`.
#[derive(Clone, Debug)]
pub enum ProgramCreationError {
/// Error while compiling one of the shaders.
CompilationError(String),
/// Error while linking the program.
LinkingError(String),
/// One of the requested shader types is not supported by the backend.
///
/// Usually the case for geometry shaders.
ShaderTypeNotSupported,
/// The OpenGL implementation doesn't provide a compiler.<|fim▁hole|>
/// You have requested transform feedback varyings, but transform feedback is not supported
/// by the backend.
TransformFeedbackNotSupported,
/// You have requested point size setting from the shader, but it's not
/// supported by the backend.
PointSizeNotSupported,
}
impl fmt::Display for ProgramCreationError {
fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match self {
&ProgramCreationError::CompilationError(ref s) =>
formatter.write_fmt(format_args!("Compilation error in one of the shaders: {}", s)),
&ProgramCreationError::LinkingError(ref s) =>
formatter.write_fmt(format_args!("Error while linking shaders together: {}", s)),
&ProgramCreationError::ShaderTypeNotSupported =>
formatter.write_str("One of the request shader type is \
not supported by the backend"),
&ProgramCreationError::CompilationNotSupported =>
formatter.write_str("The backend doesn't support shaders compilation"),
&ProgramCreationError::TransformFeedbackNotSupported =>
formatter.write_str("You requested transform feedback, but this feature is not \
supported by the backend"),
&ProgramCreationError::PointSizeNotSupported =>
formatter.write_str("You requested point size setting, but it's not \
supported by the backend"),
}
}
}
impl Error for ProgramCreationError {
fn description(&self) -> &str {
match self {
&ProgramCreationError::CompilationError(_) => "Compilation error in one of the \
shaders",
&ProgramCreationError::LinkingError(_) => "Error while linking shaders together",
&ProgramCreationError::ShaderTypeNotSupported => "One of the request shader type is \
not supported by the backend",
&ProgramCreationError::CompilationNotSupported => "The backend doesn't support \
shaders compilation",
&ProgramCreationError::TransformFeedbackNotSupported => "Transform feedback is not \
supported by the backend.",
&ProgramCreationError::PointSizeNotSupported => "Point size is not supported by \
the backend.",
}
}
#[inline]
fn cause(&self) -> Option<&Error> {
None
}
}
/// Error while retreiving the binary representation of a program.
#[derive(Copy, Clone, Debug)]
pub enum GetBinaryError {
/// The backend doesn't support binary.
NotSupported,
}
/// Input when creating a program.
pub enum ProgramCreationInput<'a> {
/// Use GLSL source code.
SourceCode {
/// Source code of the vertex shader.
vertex_shader: &'a str,
/// Source code of the optional tessellation control shader.
tessellation_control_shader: Option<&'a str>,
/// Source code of the optional tessellation evaluation shader.
tessellation_evaluation_shader: Option<&'a str>,
/// Source code of the optional geometry shader.
geometry_shader: Option<&'a str>,
/// Source code of the fragment shader.
fragment_shader: &'a str,
/// The list of variables and mode to use for transform feedback.
///
/// The information specified here will be passed to the OpenGL linker. If you pass
/// `None`, then you won't be able to use transform feedback.
transform_feedback_varyings: Option<(Vec<String>, TransformFeedbackMode)>,
/// Whether the fragment shader outputs colors in `sRGB` or `RGB`. This is false by default,
/// meaning that the program outputs `RGB`.
///
/// If this is false, then `GL_FRAMEBUFFER_SRGB` will be enabled when this program is used
/// (if it is supported).
outputs_srgb: bool,
/// Whether the shader uses point size.
uses_point_size: bool,
},
/// Use a precompiled binary.
Binary {
/// The data.
data: Binary,
/// See `SourceCode::outputs_srgb`.
outputs_srgb: bool,
/// Whether the shader uses point size.
uses_point_size: bool,
}
}
/// Represents the source code of a program.
pub struct SourceCode<'a> {
/// Source code of the vertex shader.
pub vertex_shader: &'a str,
/// Source code of the optional tessellation control shader.
pub tessellation_control_shader: Option<&'a str>,
/// Source code of the optional tessellation evaluation shader.
pub tessellation_evaluation_shader: Option<&'a str>,
/// Source code of the optional geometry shader.
pub geometry_shader: Option<&'a str>,
/// Source code of the fragment shader.
pub fragment_shader: &'a str,
}
impl<'a> From<SourceCode<'a>> for ProgramCreationInput<'a> {
#[inline]
fn from(code: SourceCode<'a>) -> ProgramCreationInput<'a> {
let SourceCode { vertex_shader, fragment_shader, geometry_shader,
tessellation_control_shader, tessellation_evaluation_shader } = code;
ProgramCreationInput::SourceCode {
vertex_shader: vertex_shader,
tessellation_control_shader: tessellation_control_shader,
tessellation_evaluation_shader: tessellation_evaluation_shader,
geometry_shader: geometry_shader,
fragment_shader: fragment_shader,
transform_feedback_varyings: None,
outputs_srgb: false,
uses_point_size: false,
}
}
}
/// Represents the compiled binary data of a program.
pub struct Binary {
/// An implementation-defined format.
pub format: u32,
/// The binary data.
pub content: Vec<u8>,
}
impl<'a> From<Binary> for ProgramCreationInput<'a> {
#[inline]
fn from(binary: Binary) -> ProgramCreationInput<'a> {
ProgramCreationInput::Binary {
data: binary,
outputs_srgb: false,
uses_point_size: false,
}
}
}<|fim▁end|>
|
CompilationNotSupported,
|
<|file_name|>mouseevent.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::MouseEventBinding;
use dom::bindings::codegen::Bindings::MouseEventBinding::MouseEventMethods;
use dom::bindings::codegen::Bindings::UIEventBinding::UIEventMethods;
use dom::bindings::codegen::InheritTypes::{EventCast, UIEventCast, MouseEventDerived};
use dom::bindings::error::Fallible;
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{JS, MutNullableHeap, Root, RootedReference};
use dom::bindings::utils::reflect_dom_object;
use dom::event::{Event, EventTypeId, EventBubbles, EventCancelable};
use dom::eventtarget::EventTarget;
use dom::uievent::UIEvent;
use dom::window::Window;
use util::opts;
use util::str::DOMString;
use std::cell::Cell;
use std::default::Default;
#[dom_struct]
pub struct MouseEvent {
uievent: UIEvent,
screen_x: Cell<i32>,
screen_y: Cell<i32>,
client_x: Cell<i32>,
client_y: Cell<i32>,
ctrl_key: Cell<bool>,
shift_key: Cell<bool>,
alt_key: Cell<bool>,
meta_key: Cell<bool>,
button: Cell<i16>,
related_target: MutNullableHeap<JS<EventTarget>>,
}
impl MouseEventDerived for Event {
fn is_mouseevent(&self) -> bool {
*self.type_id() == EventTypeId::MouseEvent
}<|fim▁hole|>}
impl MouseEvent {
fn new_inherited() -> MouseEvent {
MouseEvent {
uievent: UIEvent::new_inherited(EventTypeId::MouseEvent),
screen_x: Cell::new(0),
screen_y: Cell::new(0),
client_x: Cell::new(0),
client_y: Cell::new(0),
ctrl_key: Cell::new(false),
shift_key: Cell::new(false),
alt_key: Cell::new(false),
meta_key: Cell::new(false),
button: Cell::new(0),
related_target: Default::default(),
}
}
pub fn new_uninitialized(window: &Window) -> Root<MouseEvent> {
reflect_dom_object(box MouseEvent::new_inherited(),
GlobalRef::Window(window),
MouseEventBinding::Wrap)
}
pub fn new(window: &Window,
type_: DOMString,
canBubble: EventBubbles,
cancelable: EventCancelable,
view: Option<&Window>,
detail: i32,
screenX: i32,
screenY: i32,
clientX: i32,
clientY: i32,
ctrlKey: bool,
altKey: bool,
shiftKey: bool,
metaKey: bool,
button: i16,
relatedTarget: Option<&EventTarget>) -> Root<MouseEvent> {
let ev = MouseEvent::new_uninitialized(window);
ev.r().InitMouseEvent(type_, canBubble == EventBubbles::Bubbles, cancelable == EventCancelable::Cancelable,
view, detail,
screenX, screenY, clientX, clientY,
ctrlKey, altKey, shiftKey, metaKey,
button, relatedTarget);
ev
}
pub fn Constructor(global: GlobalRef,
type_: DOMString,
init: &MouseEventBinding::MouseEventInit) -> Fallible<Root<MouseEvent>> {
let bubbles = if init.parent.parent.parent.bubbles {
EventBubbles::Bubbles
} else {
EventBubbles::DoesNotBubble
};
let cancelable = if init.parent.parent.parent.cancelable {
EventCancelable::Cancelable
} else {
EventCancelable::NotCancelable
};
let event = MouseEvent::new(global.as_window(), type_,
bubbles,
cancelable,
init.parent.parent.view.r(),
init.parent.parent.detail,
init.screenX, init.screenY,
init.clientX, init.clientY, init.parent.ctrlKey,
init.parent.altKey, init.parent.shiftKey, init.parent.metaKey,
init.button, init.relatedTarget.r());
Ok(event)
}
}
impl<'a> MouseEventMethods for &'a MouseEvent {
// https://w3c.github.io/uievents/#widl-MouseEvent-screenX
fn ScreenX(self) -> i32 {
self.screen_x.get()
}
// https://w3c.github.io/uievents/#widl-MouseEvent-screenY
fn ScreenY(self) -> i32 {
self.screen_y.get()
}
// https://w3c.github.io/uievents/#widl-MouseEvent-clientX
fn ClientX(self) -> i32 {
self.client_x.get()
}
// https://w3c.github.io/uievents/#widl-MouseEvent-clientY
fn ClientY(self) -> i32 {
self.client_y.get()
}
// https://w3c.github.io/uievents/#widl-MouseEvent-ctrlKey
fn CtrlKey(self) -> bool {
self.ctrl_key.get()
}
// https://w3c.github.io/uievents/#widl-MouseEvent-shiftKey
fn ShiftKey(self) -> bool {
self.shift_key.get()
}
// https://w3c.github.io/uievents/#widl-MouseEvent-altKey
fn AltKey(self) -> bool {
self.alt_key.get()
}
// https://w3c.github.io/uievents/#widl-MouseEvent-metaKey
fn MetaKey(self) -> bool {
self.meta_key.get()
}
// https://w3c.github.io/uievents/#widl-MouseEvent-button
fn Button(self) -> i16 {
self.button.get()
}
// https://w3c.github.io/uievents/#widl-MouseEvent-relatedTarget
fn GetRelatedTarget(self) -> Option<Root<EventTarget>> {
self.related_target.get().map(Root::from_rooted)
}
// See discussion at:
// - https://github.com/servo/servo/issues/6643
// - https://bugzilla.mozilla.org/show_bug.cgi?id=1186125
// This returns the same result as current gecko.
// https://developer.mozilla.org/en-US/docs/Web/API/MouseEvent/which
fn Which(self) -> i32 {
if opts::experimental_enabled() {
(self.button.get() + 1) as i32
} else {
0
}
}
// https://w3c.github.io/uievents/#widl-MouseEvent-initMouseEvent
fn InitMouseEvent(self,
typeArg: DOMString,
canBubbleArg: bool,
cancelableArg: bool,
viewArg: Option<&Window>,
detailArg: i32,
screenXArg: i32,
screenYArg: i32,
clientXArg: i32,
clientYArg: i32,
ctrlKeyArg: bool,
altKeyArg: bool,
shiftKeyArg: bool,
metaKeyArg: bool,
buttonArg: i16,
relatedTargetArg: Option<&EventTarget>) {
let event: &Event = EventCast::from_ref(self);
if event.dispatching() {
return;
}
let uievent: &UIEvent = UIEventCast::from_ref(self);
uievent.InitUIEvent(typeArg, canBubbleArg, cancelableArg, viewArg, detailArg);
self.screen_x.set(screenXArg);
self.screen_y.set(screenYArg);
self.client_x.set(clientXArg);
self.client_y.set(clientYArg);
self.ctrl_key.set(ctrlKeyArg);
self.alt_key.set(altKeyArg);
self.shift_key.set(shiftKeyArg);
self.meta_key.set(metaKeyArg);
self.button.set(buttonArg);
self.related_target.set(relatedTargetArg.map(JS::from_ref));
}
}<|fim▁end|>
| |
<|file_name|>skeletonmm-tarball.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
# External command, intended to be called with run_command() or custom_target()
# in meson.build
# argv[1] argv[2] argv[3:]
# skeletonmm-tarball.py <output_file_or_check> <source_dir> <input_files...>
import os
import sys
import shutil
import tarfile<|fim▁hole|> # In order from most wanted to least wanted: .tar.xz, .tar.gz, .tar
available_archive_formats = []
for af in shutil.get_archive_formats():
# Keep the formats in a list, skip the descriptions.
available_archive_formats += [af[0]]
if 'xztar' in available_archive_formats:
suffix = '.tar.xz'
elif 'gztar' in available_archive_formats:
suffix = '.tar.gz'
else: # Uncompressed tar format is always available.
suffix = '.tar'
print(suffix, end='') # stdout can be read in the meson.build file.
sys.exit(0)
# Create an archive.
output_file = sys.argv[1]
source_dir = sys.argv[2]
if output_file.endswith('.xz'):
mode = 'w:xz'
elif output_file.endswith('.gz'):
mode = 'w:gz'
else:
mode = 'w'
with tarfile.open(output_file, mode=mode) as tar_file:
os.chdir(source_dir) # Input filenames are relative to source_dir.
for file in sys.argv[3:]:
tar_file.add(file)
# Errors raise exceptions. If an exception is raised, Meson+ninja will notice
# that the command failed, despite exit(0).
sys.exit(0)
# shutil.make_archive() might be an alternative, but it only archives
# whole directories. It's not useful, if you want to have full control
# of which files are archived.<|fim▁end|>
|
if sys.argv[1] == 'check':
# Called from run_command() during setup or configuration.
# Check which archive format can be used.
|
<|file_name|>tensor_util.py<|end_file_name|><|fim▁begin|># Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensor utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
__all__ = [
'assert_same_float_dtype',
'assert_scalar_int',
'convert_to_tensor_or_sparse_tensor',
'is_tensor',
'reduce_sum_n',
'with_shape',
'with_same_shape']
def _assert_same_base_type(items, expected_type=None):
r"""Asserts all items are of the same base type.
Args:
items: List of graph items (e.g., `Variable`, `Tensor`, `SparseTensor`,
`Operation`, or `IndexedSlices`). Can include `None` elements, which
will be ignored.
expected_type: Expected type. If not specified, assert all items are
of the same base type.
Returns:
Validated type, or none if neither expected_type nor items provided.
Raises:
ValueError: If any types do not match.
"""
original_item_str = None
for item in items:
if item is not None:
item_type = item.dtype.base_dtype
if not expected_type:
expected_type = item_type
original_item_str = item.name if hasattr(item, 'name') else str(item)
elif expected_type != item_type:
raise ValueError('%s, type=%s, must be of the same type (%s)%s.' % (
item.name if hasattr(item, 'name') else str(item),
item_type, expected_type,
(' as %s' % original_item_str) if original_item_str else ''))
return expected_type
def assert_same_float_dtype(tensors=None, dtype=None):
"""Validate and return float type based on `tensors` and `dtype`.
For ops such as matrix multiplication, inputs and weights must be of the
same float type. This function validates that all `tensors` are the same type,
validates that type is `dtype` (if supplied), and returns the type. Type must
be `dtypes.float32` or `dtypes.float64`. If neither `tensors` nor
`dtype` is supplied, default to `dtypes.float32`.
Args:
tensors: Tensors of input values. Can include `None` elements, which will be
ignored.
dtype: Expected type.
Returns:
Validated type.
Raises:
ValueError: if neither `tensors` nor `dtype` is supplied, or result is not
float.
"""
if tensors:
dtype = _assert_same_base_type(tensors, dtype)
if not dtype:
dtype = dtypes.float32
elif not dtype.is_floating:
raise ValueError('Expected float, got %s.' % dtype)
return dtype
def assert_scalar_int(tensor):
"""Assert `tensor` is 0-D, of type `tf.int32` or `tf.int64`.
Args:
tensor: Tensor to test.
Returns:
`tensor`, for chaining.
Raises:
ValueError: if `tensor` is not 0-D, of type `tf.int32` or `tf.int64`.
"""
data_type = tensor.dtype
if data_type.base_dtype not in [dtypes.int32, dtypes.int64]:
raise ValueError('Unexpected type %s for %s.' % (data_type, tensor.name))
shape = tensor.get_shape()
if shape.ndims != 0:
raise ValueError('Unexpected shape %s for %s.' % (shape, tensor.name))
return tensor
def reduce_sum_n(tensors, name=None):
"""Reduce tensors to a scalar sum.
This reduces each tensor in `tensors` to a scalar via `tf.reduce_sum`, then
adds them via `tf.add_n`.
Args:
tensors: List of tensors, all of the same numeric type.
name: Tensor name, and scope for all other ops.
Returns:
Total loss tensor, or None if no losses have been configured.
Raises:
ValueError: if `losses` is missing or empty.
"""
if not tensors:
raise ValueError('No tensors provided.')
tensors = [math_ops.reduce_sum(t, name='%s/sum' % t.op.name) for t in tensors]
if len(tensors) == 1:
return tensors[0]
with ops.name_scope(name, 'reduce_sum_n', tensors) as scope:
return math_ops.add_n(tensors, name=scope)
def _all_equal(tensor0, tensor1):
with ops.name_scope('all_equal', values=[tensor0, tensor1]) as scope:
return math_ops.reduce_all(
math_ops.equal(tensor0, tensor1, name='equal'), name=scope)
def _is_rank(expected_rank, actual_tensor):
"""Returns whether actual_tensor's rank is expected_rank.
Args:
expected_rank: Integer defining the expected rank, or tensor of same.
actual_tensor: Tensor to test.
Returns:
New tensor.
"""
with ops.name_scope('is_rank', values=[actual_tensor]) as scope:
expected = ops.convert_to_tensor(expected_rank, name='expected')
actual = array_ops.rank(actual_tensor, name='actual')
return math_ops.equal(expected, actual, name=scope)
<|fim▁hole|>def _is_shape(expected_shape, actual_tensor, actual_shape=None):
"""Returns whether actual_tensor's shape is expected_shape.
Args:
expected_shape: Integer list defining the expected shape, or tensor of same.
actual_tensor: Tensor to test.
actual_shape: Shape of actual_tensor, if we already have it.
Returns:
New tensor.
"""
with ops.name_scope('is_shape', values=[actual_tensor]) as scope:
is_rank = _is_rank(array_ops.size(expected_shape), actual_tensor)
if actual_shape is None:
actual_shape = array_ops.shape(actual_tensor, name='actual')
shape_equal = _all_equal(
ops.convert_to_tensor(expected_shape, name='expected'),
actual_shape)
return math_ops.logical_and(is_rank, shape_equal, name=scope)
def _assert_shape_op(expected_shape, actual_tensor):
"""Asserts actual_tensor's shape is expected_shape.
Args:
expected_shape: List of integers defining the expected shape, or tensor of
same.
actual_tensor: Tensor to test.
Returns:
New assert tensor.
"""
with ops.name_scope('assert_shape', values=[actual_tensor]) as scope:
actual_shape = array_ops.shape(actual_tensor, name='actual')
is_shape = _is_shape(expected_shape, actual_tensor, actual_shape)
return control_flow_ops.Assert(
is_shape, [
'Wrong shape for %s [expected] [actual].' % actual_tensor.name,
expected_shape,
actual_shape
], name=scope)
def with_same_shape(expected_tensor, tensor):
"""Assert tensors are the same shape, from the same graph.
Args:
expected_tensor: Tensor with expected shape.
tensor: Tensor of actual values.
Returns:
Tuple of (actual_tensor, label_tensor), possibly with assert ops added.
"""
with ops.name_scope('%s/' % tensor.op.name, values=[expected_tensor, tensor]):
tensor_shape = expected_tensor.get_shape()
expected_shape = (
tensor_shape.as_list() if tensor_shape.is_fully_defined()
else array_ops.shape(expected_tensor, name='expected_shape'))
return with_shape(expected_shape, tensor)
def is_tensor(x):
"""Check for tensor types.
Check whether an object is a tensor. Equivalent to
`isinstance(x, [tf.Tensor, tf.SparseTensor, tf.Variable])`.
Args:
x: An python object to check.
Returns:
`True` if `x` is a tensor, `False` if not.
"""
tensor_types = (ops.Tensor, ops.SparseTensor, variables.Variable)
return isinstance(x, tensor_types)
def with_shape(expected_shape, tensor):
"""Asserts tensor has expected shape.
If tensor shape and expected_shape, are fully defined, assert they match.
Otherwise, add assert op that will validate the shape when tensor is
evaluated, and set shape on tensor.
Args:
expected_shape: Expected shape to assert, as a 1D array of ints, or tensor
of same.
tensor: Tensor whose shape we're validating.
Returns:
tensor, perhaps with a dependent assert operation.
Raises:
ValueError: if tensor has an invalid shape.
"""
if isinstance(tensor, ops.SparseTensor):
raise ValueError('SparseTensor not supported.')
# Shape type must be 1D int32.
if is_tensor(expected_shape):
if expected_shape.dtype.base_dtype != dtypes.int32:
raise ValueError(
'Invalid dtype %s for shape %s expected of tensor %s.' % (
expected_shape.dtype, expected_shape, tensor.name))
if isinstance(expected_shape, (list, tuple)):
if not expected_shape:
expected_shape = np.asarray([], dtype=np.int32)
else:
np_expected_shape = np.asarray(expected_shape)
expected_shape = (
np.asarray(expected_shape, dtype=np.int32)
if np_expected_shape.dtype == np.int64 else np_expected_shape)
if isinstance(expected_shape, np.ndarray):
if expected_shape.ndim > 1:
raise ValueError(
'Invalid rank %s for shape %s expected of tensor %s.' % (
expected_shape.ndim, expected_shape, tensor.name))
if expected_shape.dtype != np.int32:
raise ValueError(
'Invalid dtype %s for shape %s expected of tensor %s.' % (
expected_shape.dtype, expected_shape, tensor.name))
actual_shape = tensor.get_shape()
if not actual_shape.is_fully_defined() or is_tensor(expected_shape):
with ops.name_scope('%s/' % tensor.op.name, values=[tensor]):
if not is_tensor(expected_shape) and (len(expected_shape) < 1):
# TODO(irving): Remove scalar special case
return array_ops.reshape(tensor, [])
with ops.control_dependencies([_assert_shape_op(expected_shape, tensor)]):
result = array_ops.identity(tensor)
if not is_tensor(expected_shape):
result.set_shape(expected_shape)
return result
if (not is_tensor(expected_shape) and
not actual_shape.is_compatible_with(expected_shape)):
if (len(expected_shape) < 1) and actual_shape.is_compatible_with([1]):
# TODO(irving): Remove scalar special case.
with ops.name_scope('%s/' % tensor.op.name, values=[tensor]):
return array_ops.reshape(tensor, [])
raise ValueError('Invalid shape for tensor %s, expected %s, got %s.' % (
tensor.name, expected_shape, actual_shape))
return tensor
def convert_to_tensor_or_sparse_tensor(
value, dtype=None, name=None, as_ref=False):
"""Converts value to a `SparseTensor` or `Tensor`.
Args:
value: A `SparseTensor`, `SparseTensorValue`, or an object whose type has a
registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the
type is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
as_ref: True if we want the result as a ref tensor. Only used if a new
`Tensor` is created.
Returns:
A `SparseTensor` or `Tensor` based on `value`.
Raises:
RuntimeError: If result type is incompatible with `dtype`.
"""
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
if isinstance(value, ops.SparseTensorValue):
value = ops.SparseTensor.from_value(value)
if isinstance(value, ops.SparseTensor):
if dtype and not dtype.is_compatible_with(value.dtype):
raise RuntimeError(
'Sparse dtype: requested = %s, actual = %s' % (
dtype.name, value.dtype.name))
return value
return ops.convert_to_tensor(value, dtype=dtype, name=name, as_ref=as_ref)<|fim▁end|>
| |
<|file_name|>errors.rs<|end_file_name|><|fim▁begin|>use log::error;
use std::fmt::Debug;
use thiserror::Error;
#[derive(Error, Debug)]<|fim▁hole|> #[error("Error")]
Error(String),
}
impl std::convert::From<std::io::Error> for FlushError {
fn from(e: std::io::Error) -> Self {
FlushError::Error(format!("{}", e))
}
}<|fim▁end|>
|
pub enum FlushError {
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*-coding:Utf-8 -*
# Copyright (c) 2012 NOEL-BARON Léo
# All rights reserved.<|fim▁hole|># * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'recettes' et ses sous-commandes.
Dans ce fichier se trouve la commande même.
"""
from primaires.interpreteur.commande.commande import Commande
from .editer import PrmEditer
from .lister import PrmLister
from .supprimer import PrmSupprimer
class CmdRecettes(Commande):
"""Commande 'recettes'.
"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "recettes", "recipes")
self.groupe = "administrateur"
self.aide_courte = "manipulation des recettes"
self.aide_longue = \
""
def ajouter_parametres(self):
"""Ajout des paramètres"""
self.ajouter_parametre(PrmEditer())
self.ajouter_parametre(PrmLister())
self.ajouter_parametre(PrmSupprimer())<|fim▁end|>
|
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
|
<|file_name|>objects.rs<|end_file_name|><|fim▁begin|>// Copyright 2019 Dmitry Tantsur <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Stored objects.
use std::collections::HashMap;
use std::io::Read;
use std::rc::Rc;
use chrono::{DateTime, TimeZone};
use fallible_iterator::{FallibleIterator, IntoFallibleIterator};
use osauth::services::OBJECT_STORAGE;
use reqwest::Url;
use super::super::common::{
ContainerRef, IntoVerified, ObjectRef, Refresh, ResourceIterator, ResourceQuery,
};
use super::super::session::Session;
use super::super::utils::Query;
use super::super::{Error, Result};
use super::{api, protocol};
/// A query to objects.
#[derive(Clone, Debug)]
pub struct ObjectQuery {
session: Rc<Session>,
c_name: String,
query: Query,
can_paginate: bool,
}
/// A request to create an object.
#[derive(Debug)]
pub struct NewObject<R> {
session: Rc<Session>,
c_name: ContainerRef,
name: String,
body: R,
headers: ObjectHeaders,
}
/// Optional headers for an object.
#[derive(Debug, Default)]
pub struct ObjectHeaders {
pub delete_after: Option<u32>,
pub delete_at: Option<i64>,
pub metadata: HashMap<String, String>,
}
/// Structure representing an object.
#[derive(Clone, Debug)]
pub struct Object {
session: Rc<Session>,
inner: protocol::Object,
c_name: String,
}
impl Object {
/// Create a new Object object.
pub(crate) fn new(session: Rc<Session>, inner: protocol::Object, c_name: String) -> Object {
Object {
session,
inner,
c_name,
}
}
/// Create an object.
pub(crate) fn create<C, Id, R>(
session: Rc<Session>,
container: C,
name: Id,
body: R,
) -> Result<Object>
where
C: Into<ContainerRef>,
Id: AsRef<str>,
R: Read + Sync + Send + 'static,
{
let new_object = NewObject::new(
session,
container.into(),
// TODO(dtantsur): get rid of to_string here.
name.as_ref().to_string(),
body,
);
new_object.create()
}
/// Load an Object.
pub(crate) fn load<C, Id>(session: Rc<Session>, container: C, name: Id) -> Result<Object>
where
C: Into<ContainerRef>,
Id: AsRef<str>,
{
let c_ref = container.into();
let c_name = c_ref.to_string();
let inner = api::get_object(&session, c_ref, name)?;
Ok(Object::new(session, inner, c_name))
}
/// Delete the object.
#[inline]
pub fn delete(self) -> Result<()> {
api::delete_object(&self.session, &self.c_name, self.inner.name)
}
/// Download the object.
///
/// The object can be read from the resulting reader.
#[inline]
pub fn download(&self) -> Result<impl Read + '_> {
api::download_object(&self.session, &self.c_name, &self.inner.name)
}
transparent_property! {
#[doc = "Total size of the object."]
bytes: u64
}
/// Container name.
#[inline]
pub fn container_name(&self) -> &String {
&self.c_name
}
transparent_property! {
#[doc = "Object content type (if set)."]
content_type: ref Option<String>
}
transparent_property! {
#[doc = "Object hash or ETag, which is a content's md5 hash"]
hash: ref Option<String>
}
transparent_property! {
#[doc = "Object name."]
name: ref String
}
/// Object url.
#[inline]
pub fn url(&self) -> Result<Url> {
self.session
.get_endpoint(OBJECT_STORAGE, &[self.container_name(), self.name()])
}
}
impl Refresh for Object {
/// Refresh the object.
fn refresh(&mut self) -> Result<()> {
self.inner = api::get_object(&self.session, &self.c_name, &self.inner.name)?;
Ok(())
}
}
impl ObjectQuery {
pub(crate) fn new<C: Into<ContainerRef>>(session: Rc<Session>, container: C) -> ObjectQuery {
ObjectQuery {
session,
c_name: container.into().into(),
query: Query::new(),
can_paginate: true,
}
}
/// Add marker to the request.
///
/// Using this disables automatic pagination.
pub fn with_marker<T: Into<String>>(mut self, marker: T) -> Self {
self.can_paginate = false;
self.query.push_str("marker", marker);
self
}
/// Add limit to the request.
///
/// Using this disables automatic pagination.
pub fn with_limit(mut self, limit: usize) -> Self {
self.can_paginate = false;
self.query.push("limit", limit);
self
}
/// Convert this query into an iterator executing the request.
///
/// Returns a `FallibleIterator`, which is an iterator with each `next`
/// call returning a `Result`.
///
/// Note that no requests are done until you start iterating.
pub fn into_iter(self) -> ResourceIterator<ObjectQuery> {
debug!(
"Fetching objects in container {} with {:?}",
self.c_name, self.query
);
ResourceIterator::new(self)
}
/// Execute this request and return all results.
///
/// A convenience shortcut for `self.into_iter().collect()`.
pub fn all(self) -> Result<Vec<Object>> {
self.into_iter().collect()
}
<|fim▁hole|> pub fn one(mut self) -> Result<Object> {
debug!(
"Fetching one object in container {} with {:?}",
self.c_name, self.query
);
if self.can_paginate {
// We need only one result. We fetch maximum two to be able
// to check if the query yieled more than one result.
self.query.push("limit", 2);
}
self.into_iter().one()
}
}
impl ResourceQuery for ObjectQuery {
type Item = Object;
const DEFAULT_LIMIT: usize = 100;
fn can_paginate(&self) -> Result<bool> {
Ok(self.can_paginate)
}
fn extract_marker(&self, resource: &Self::Item) -> String {
resource.name().clone()
}
fn fetch_chunk(&self, limit: Option<usize>, marker: Option<String>) -> Result<Vec<Self::Item>> {
let query = self.query.with_marker_and_limit(limit, marker);
Ok(api::list_objects(&self.session, &self.c_name, query)?
.into_iter()
.map(|item| Object {
session: self.session.clone(),
inner: item,
c_name: self.c_name.clone(),
})
.collect())
}
}
impl IntoFallibleIterator for ObjectQuery {
type Item = Object;
type Error = Error;
type IntoFallibleIter = ResourceIterator<ObjectQuery>;
fn into_fallible_iter(self) -> Self::IntoFallibleIter {
self.into_iter()
}
}
impl<R: Read + Sync + Send + 'static> NewObject<R> {
/// Start creating an object.
pub(crate) fn new(
session: Rc<Session>,
c_name: ContainerRef,
name: String,
body: R,
) -> NewObject<R> {
NewObject {
session,
c_name,
name,
body,
headers: ObjectHeaders::default(),
}
}
/// Request creation of the object.
pub fn create(self) -> Result<Object> {
let c_name = self.c_name.clone();
let inner = api::create_object(
&self.session,
self.c_name,
self.name,
self.body,
self.headers,
)?;
Ok(Object::new(self.session, inner, c_name.into()))
}
/// Metadata to set on the object.
#[inline]
pub fn metadata(&mut self) -> &mut HashMap<String, String> {
&mut self.headers.metadata
}
/// Set TTL in seconds for the object.
#[inline]
pub fn with_delete_after(mut self, ttl: u32) -> NewObject<R> {
self.headers.delete_after = Some(ttl);
self
}
/// Set the date and time when the object must be deleted.
#[inline]
pub fn with_delete_at<T: TimeZone>(mut self, datetime: DateTime<T>) -> NewObject<R> {
self.headers.delete_at = Some(datetime.timestamp());
self
}
/// Insert a new metadata item.
#[inline]
pub fn with_metadata<K, V>(mut self, key: K, item: V) -> NewObject<R>
where
K: Into<String>,
V: Into<String>,
{
let _ = self.headers.metadata.insert(key.into(), item.into());
self
}
}
impl From<Object> for ObjectRef {
fn from(value: Object) -> ObjectRef {
ObjectRef::new_verified(value.inner.name)
}
}
#[cfg(feature = "object-storage")]
impl IntoVerified for ObjectRef {}<|fim▁end|>
|
/// Return one and exactly one result.
///
/// Fails with `ResourceNotFound` if the query produces no results and
/// with `TooManyItems` if the query produces more than one result.
|
<|file_name|>pipe_unix.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use alloc::arc::Arc;
use libc;
use std::c_str::CString;
use std::mem;
use std::rt::mutex;
use std::rt::rtio;
use std::rt::rtio::{IoResult, IoError};
use std::sync::atomic;
use super::retry;
use super::net;
use super::util;
use super::c;
use super::process;
use super::file::{fd_t, FileDesc};
fn unix_socket(ty: libc::c_int) -> IoResult<fd_t> {
match unsafe { libc::socket(libc::AF_UNIX, ty, 0) } {
-1 => Err(super::last_error()),
fd => Ok(fd)
}
}
fn addr_to_sockaddr_un(addr: &CString,
storage: &mut libc::sockaddr_storage)
-> IoResult<libc::socklen_t> {
// the sun_path length is limited to SUN_LEN (with null)
assert!(mem::size_of::<libc::sockaddr_storage>() >=
mem::size_of::<libc::sockaddr_un>());
let s = unsafe { &mut *(storage as *mut _ as *mut libc::sockaddr_un) };
let len = addr.len();
if len > s.sun_path.len() - 1 {
#[cfg(unix)] use libc::EINVAL as ERROR;
#[cfg(windows)] use libc::WSAEINVAL as ERROR;
return Err(IoError {
code: ERROR as uint,
extra: 0,
detail: Some("path must be smaller than SUN_LEN".to_string()),
})
}
s.sun_family = libc::AF_UNIX as libc::sa_family_t;
for (slot, value) in s.sun_path.mut_iter().zip(addr.iter()) {
*slot = value;
}
// count the null terminator
let len = mem::size_of::<libc::sa_family_t>() + len + 1;
return Ok(len as libc::socklen_t);
}
struct Inner {
fd: fd_t,
// Unused on Linux, where this lock is not necessary.
#[allow(dead_code)]
lock: mutex::NativeMutex
}
impl Inner {
fn new(fd: fd_t) -> Inner {
Inner { fd: fd, lock: unsafe { mutex::NativeMutex::new() } }
}
}
impl Drop for Inner {
fn drop(&mut self) { unsafe { let _ = libc::close(self.fd); } }
}
fn connect(addr: &CString, ty: libc::c_int,<|fim▁hole|> let mut storage = unsafe { mem::zeroed() };
let len = try!(addr_to_sockaddr_un(addr, &mut storage));
let inner = Inner::new(try!(unix_socket(ty)));
let addrp = &storage as *const _ as *const libc::sockaddr;
match timeout {
None => {
match retry(|| unsafe { libc::connect(inner.fd, addrp, len) }) {
-1 => Err(super::last_error()),
_ => Ok(inner)
}
}
Some(timeout_ms) => {
try!(util::connect_timeout(inner.fd, addrp, len, timeout_ms));
Ok(inner)
}
}
}
fn bind(addr: &CString, ty: libc::c_int) -> IoResult<Inner> {
let mut storage = unsafe { mem::zeroed() };
let len = try!(addr_to_sockaddr_un(addr, &mut storage));
let inner = Inner::new(try!(unix_socket(ty)));
let addrp = &storage as *const _ as *const libc::sockaddr;
match unsafe {
libc::bind(inner.fd, addrp, len)
} {
-1 => Err(super::last_error()),
_ => Ok(inner)
}
}
////////////////////////////////////////////////////////////////////////////////
// Unix Streams
////////////////////////////////////////////////////////////////////////////////
pub struct UnixStream {
inner: Arc<Inner>,
read_deadline: u64,
write_deadline: u64,
}
impl UnixStream {
pub fn connect(addr: &CString,
timeout: Option<u64>) -> IoResult<UnixStream> {
connect(addr, libc::SOCK_STREAM, timeout).map(|inner| {
UnixStream::new(Arc::new(inner))
})
}
fn new(inner: Arc<Inner>) -> UnixStream {
UnixStream {
inner: inner,
read_deadline: 0,
write_deadline: 0,
}
}
fn fd(&self) -> fd_t { self.inner.fd }
#[cfg(target_os = "linux")]
fn lock_nonblocking(&self) {}
#[cfg(not(target_os = "linux"))]
fn lock_nonblocking<'a>(&'a self) -> net::Guard<'a> {
let ret = net::Guard {
fd: self.fd(),
guard: unsafe { self.inner.lock.lock() },
};
assert!(util::set_nonblocking(self.fd(), true).is_ok());
ret
}
}
impl rtio::RtioPipe for UnixStream {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> {
let fd = self.fd();
let dolock = || self.lock_nonblocking();
let doread = |nb| unsafe {
let flags = if nb {c::MSG_DONTWAIT} else {0};
libc::recv(fd,
buf.as_mut_ptr() as *mut libc::c_void,
buf.len() as libc::size_t,
flags) as libc::c_int
};
net::read(fd, self.read_deadline, dolock, doread)
}
fn write(&mut self, buf: &[u8]) -> IoResult<()> {
let fd = self.fd();
let dolock = || self.lock_nonblocking();
let dowrite = |nb: bool, buf: *const u8, len: uint| unsafe {
let flags = if nb {c::MSG_DONTWAIT} else {0};
libc::send(fd,
buf as *mut libc::c_void,
len as libc::size_t,
flags) as i64
};
match net::write(fd, self.write_deadline, buf, true, dolock, dowrite) {
Ok(_) => Ok(()),
Err(e) => Err(e)
}
}
fn clone(&self) -> Box<rtio::RtioPipe + Send> {
box UnixStream::new(self.inner.clone()) as Box<rtio::RtioPipe + Send>
}
fn close_write(&mut self) -> IoResult<()> {
super::mkerr_libc(unsafe { libc::shutdown(self.fd(), libc::SHUT_WR) })
}
fn close_read(&mut self) -> IoResult<()> {
super::mkerr_libc(unsafe { libc::shutdown(self.fd(), libc::SHUT_RD) })
}
fn set_timeout(&mut self, timeout: Option<u64>) {
let deadline = timeout.map(|a| ::io::timer::now() + a).unwrap_or(0);
self.read_deadline = deadline;
self.write_deadline = deadline;
}
fn set_read_timeout(&mut self, timeout: Option<u64>) {
self.read_deadline = timeout.map(|a| ::io::timer::now() + a).unwrap_or(0);
}
fn set_write_timeout(&mut self, timeout: Option<u64>) {
self.write_deadline = timeout.map(|a| ::io::timer::now() + a).unwrap_or(0);
}
}
////////////////////////////////////////////////////////////////////////////////
// Unix Listener
////////////////////////////////////////////////////////////////////////////////
pub struct UnixListener {
inner: Inner,
path: CString,
}
impl UnixListener {
pub fn bind(addr: &CString) -> IoResult<UnixListener> {
bind(addr, libc::SOCK_STREAM).map(|fd| {
UnixListener { inner: fd, path: addr.clone() }
})
}
fn fd(&self) -> fd_t { self.inner.fd }
pub fn native_listen(self, backlog: int) -> IoResult<UnixAcceptor> {
match unsafe { libc::listen(self.fd(), backlog as libc::c_int) } {
-1 => Err(super::last_error()),
#[cfg(unix)]
_ => {
let (reader, writer) = try!(process::pipe());
try!(util::set_nonblocking(reader.fd(), true));
try!(util::set_nonblocking(writer.fd(), true));
try!(util::set_nonblocking(self.fd(), true));
Ok(UnixAcceptor {
inner: Arc::new(AcceptorInner {
listener: self,
reader: reader,
writer: writer,
closed: atomic::AtomicBool::new(false),
}),
deadline: 0,
})
}
}
}
}
impl rtio::RtioUnixListener for UnixListener {
fn listen(self: Box<UnixListener>)
-> IoResult<Box<rtio::RtioUnixAcceptor + Send>> {
self.native_listen(128).map(|a| {
box a as Box<rtio::RtioUnixAcceptor + Send>
})
}
}
pub struct UnixAcceptor {
inner: Arc<AcceptorInner>,
deadline: u64,
}
#[cfg(unix)]
struct AcceptorInner {
listener: UnixListener,
reader: FileDesc,
writer: FileDesc,
closed: atomic::AtomicBool,
}
impl UnixAcceptor {
fn fd(&self) -> fd_t { self.inner.listener.fd() }
pub fn native_accept(&mut self) -> IoResult<UnixStream> {
let deadline = if self.deadline == 0 {None} else {Some(self.deadline)};
while !self.inner.closed.load(atomic::SeqCst) {
unsafe {
let mut storage: libc::sockaddr_storage = mem::zeroed();
let storagep = &mut storage as *mut libc::sockaddr_storage;
let size = mem::size_of::<libc::sockaddr_storage>();
let mut size = size as libc::socklen_t;
match retry(|| {
libc::accept(self.fd(),
storagep as *mut libc::sockaddr,
&mut size as *mut libc::socklen_t) as libc::c_int
}) {
-1 if util::wouldblock() => {}
-1 => return Err(super::last_error()),
fd => return Ok(UnixStream::new(Arc::new(Inner::new(fd)))),
}
}
try!(util::await([self.fd(), self.inner.reader.fd()],
deadline, util::Readable));
}
Err(util::eof())
}
}
impl rtio::RtioUnixAcceptor for UnixAcceptor {
fn accept(&mut self) -> IoResult<Box<rtio::RtioPipe + Send>> {
self.native_accept().map(|s| box s as Box<rtio::RtioPipe + Send>)
}
fn set_timeout(&mut self, timeout: Option<u64>) {
self.deadline = timeout.map(|a| ::io::timer::now() + a).unwrap_or(0);
}
fn clone(&self) -> Box<rtio::RtioUnixAcceptor + Send> {
box UnixAcceptor {
inner: self.inner.clone(),
deadline: 0,
} as Box<rtio::RtioUnixAcceptor + Send>
}
#[cfg(unix)]
fn close_accept(&mut self) -> IoResult<()> {
self.inner.closed.store(true, atomic::SeqCst);
let mut fd = FileDesc::new(self.inner.writer.fd(), false);
match fd.inner_write([0]) {
Ok(..) => Ok(()),
Err(..) if util::wouldblock() => Ok(()),
Err(e) => Err(e),
}
}
}
impl Drop for UnixListener {
fn drop(&mut self) {
// Unlink the path to the socket to ensure that it doesn't linger. We're
// careful to unlink the path before we close the file descriptor to
// prevent races where we unlink someone else's path.
unsafe {
let _ = libc::unlink(self.path.as_ptr());
}
}
}<|fim▁end|>
|
timeout: Option<u64>) -> IoResult<Inner> {
|
<|file_name|>address.js<|end_file_name|><|fim▁begin|>angular.module('ordercloud-address', [])
.directive('ordercloudAddressForm', AddressFormDirective)
.directive('ordercloudAddressInfo', AddressInfoDirective)
.filter('address', AddressFilter)
;
function AddressFormDirective(OCGeography) {
return {
restrict: 'E',
scope: {
address: '=',
isbilling: '='
},
templateUrl: 'common/address/templates/address.form.tpl.html',
link: function(scope) {
scope.countries = OCGeography.Countries;
scope.states = OCGeography.States;
}
};
}
function AddressInfoDirective() {
return {
restrict: 'E',
scope: {
addressid: '@'
},
templateUrl: 'common/address/templates/address.info.tpl.html',
controller: 'AddressInfoCtrl',
controllerAs: 'addressInfo'
};
}
function AddressFilter() {
return function(address, option) {<|fim▁hole|> if (option === 'full') {
var result = [];
if (address.AddressName) {
result.push(address.AddressName);
}
result.push((address.FirstName ? address.FirstName + ' ' : '') + address.LastName);
result.push(address.Street1);
if (address.Street2) {
result.push(address.Street2);
}
result.push(address.City + ', ' + address.State + ' ' + address.Zip);
return result.join('\n');
}
else {
return address.Street1 + (address.Street2 ? ', ' + address.Street2 : '');
}
}
}<|fim▁end|>
|
if (!address) return null;
|
<|file_name|>NetzobRegex.py<|end_file_name|><|fim▁begin|>#-*- coding: utf-8 -*-
#+---------------------------------------------------------------------------+
#| 01001110 01100101 01110100 01111010 01101111 01100010 |
#| |
#| Netzob : Inferring communication protocols |
#+---------------------------------------------------------------------------+
#| Copyright (C) 2011-2014 Georges Bossert and Frédéric Guihéry |
#| This program is free software: you can redistribute it and/or modify |
#| it under the terms of the GNU General Public License as published by |
#| the Free Software Foundation, either version 3 of the License, or |
#| (at your option) any later version. |
#| |
#| This program is distributed in the hope that it will be useful, |
#| but WITHOUT ANY WARRANTY; without even the implied warranty of |
#| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
#| GNU General Public License for more details. |
#| |
#| You should have received a copy of the GNU General Public License |
#| along with this program. If not, see <http://www.gnu.org/licenses/>. |
#+---------------------------------------------------------------------------+
#| @url : http://www.netzob.org |
#| @contact : [email protected] |
#| @sponsors : Amossys, http://www.amossys.fr |
#| Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| File contributors : |
#| - Georges Bossert <georges.bossert (a) supelec.fr> |
#| - Frédéric Guihéry <frederic.guihery (a) amossys.fr> |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Standard library imports |
#+---------------------------------------------------------------------------+
import uuid
#+---------------------------------------------------------------------------+
#| Related third party imports |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Local application imports |
#+---------------------------------------------------------------------------+
from netzob.Common.Utils.Decorators import typeCheck, NetzobLogger
from netzob.Common.Utils.TypedList import TypedList
from netzob.Common.Models.Types.TypeConverter import TypeConverter
from netzob.Common.Models.Types.Raw import Raw
from netzob.Common.Models.Types.HexaString import HexaString
@NetzobLogger
class NetzobRegex(object):
"""Represents a regex describing field boundaries. Static methods
can be used to build the regex. Don't use the constructor unless you really
know what you do. Instead we highly recommend the use of the static methods offered to
build different types of regex.
If still you want to use the constructor, don't specify the group since it will
be automaticaly added. For example, if your regex is (.*), only specify .* and forget the
() that will be added. In addition the constructor will also generate and add the group identifier. Your
regex will therefore look like : (P<f45689676567987628>.*).
"""
DEFAULT_REGEX = '.*'
def __init__(self):
self.id = 'f' + str(uuid.uuid4().hex)
self.regex = NetzobRegex.DEFAULT_REGEX
@property
def regex(self):
return self.__regex
@regex.setter
@typeCheck(str)
def regex(self, regex):
if regex is None:
raise TypeError("The regex cannot be None")
self.__regex = "(?P<{0}>{1})".format(self.id, regex)
@property
def id(self):
return self.__id
@id.setter
@typeCheck(str)
def id(self, _id):
if _id is None:
raise TypeError("Id cannot be None")
if len(_id) == 0:
raise ValueError("Id must be defined (len>0)")
self.__id = _id
def __str__(self):
return str(self.regex)
def finalRegex(self):
"""This method returns the current regex with the starting and ending indicators
added. For this reason, the returned regex can't be merged with others regexes.
:return: a string with the final regex definition including start and end indicators
:rtype: :class:`str`.
"""
return "^" + str(self) + "$"
@staticmethod
def buildDefaultRegex():
"""It creates the default regex which means
we have to knowledge over the format of the field.
>>> from netzob.all import *
>>> import regex as re
>>> data = "Hello netzob, a default regex grabs everything."
>>> hexData = TypeConverter.convert(data, ASCII, HexaString)
>>> nRegex = NetzobRegex.buildDefaultRegex()
>>> compiledRegex = re.compile(str(nRegex))
>>> dynamicDatas = compiledRegex.match(hexData)
>>> print TypeConverter.convert(hexData[dynamicDatas.start(nRegex.id):dynamicDatas.end(nRegex.id)], HexaString, ASCII)
Hello netzob, a default regex grabs everything.
:return: a .* default NetzobRegex
:rtype: :class:`netzob.Common.Utils.NetzobRegex.NetzobRegex`
"""
regex = NetzobRegex()
regex.regex = '.*'
return regex
@staticmethod
def buildRegexForStaticValue(value):
"""It creates a NetzobRegex which represents
a regex with the specified Raw static value.
>>> from netzob.all import *
>>> import regex as re
>>> data = "Hello netzob"
>>> hexData = TypeConverter.convert(data, ASCII, HexaString)
>>> nRegex = NetzobRegex.buildRegexForStaticValue(data)
>>> compiledRegex = re.compile(str(nRegex))
>>> dynamicDatas = compiledRegex.match(hexData)
>>> print TypeConverter.convert(hexData[dynamicDatas.start(nRegex.id):dynamicDatas.end(nRegex.id)], HexaString, ASCII)
Hello netzob
:param value: the static value the regex must represents
:type value: python raw (will be encoded in HexaString in the regex)
:return: the regex which represents the specified valued encoed in HexaString
:type: :class:`netzob.Common.Utils.NetzobRegex.NetzobRegex`
"""
hexaStringValue = TypeConverter.convert(value, Raw, HexaString)
return NetzobStaticRegex(hexaStringValue)
@staticmethod
def buildRegexForEol():
"""It creates a NetzobRegex which represents an EOL
:return: the regex which represents an EOL
:type: :class:`netzob.Common.Utils.NetzobRegex.NetzobRegex`
"""
return NetzobEolRegex()
@staticmethod
def buildRegexForSizedValue(size):
return NetzobSizedRegex(size)
@staticmethod
def buildRegexForAlternativeRegexes(regexes):
return NetzobAlternativeRegex(regexes)
@staticmethod
def buildRegexForAggregateRegexes(regexes):
return NetzobAggregateRegex(regexes)
@NetzobLogger
class NetzobSizedRegex(NetzobRegex):
"""Represents an aggregate regex.
>>> from netzob.Common.Utils.NetzobRegex import NetzobRegex
>>> from netzob.all import *
>>> import regex as re
>>> data = "Hello netzob"
>>> hexData = TypeConverter.convert(data, ASCII, HexaString)
>>> nRegex = NetzobRegex.buildRegexForSizedValue((8*4,8*5))
>>> compiledRegex = re.compile(str(nRegex))
>>> dynamicDatas = compiledRegex.match(hexData)
>>> print TypeConverter.convert(hexData[dynamicDatas.start(nRegex.id):dynamicDatas.end(nRegex.id)], HexaString, ASCII)
Hello
>>> nRegex = NetzobRegex.buildRegexForSizedValue((None, None))
>>> compiledRegex = re.compile(str(nRegex))
>>> dynamicDatas = compiledRegex.match(hexData)
>>> print TypeConverter.convert(hexData[dynamicDatas.start(nRegex.id):dynamicDatas.end(nRegex.id)], HexaString, ASCII)
Hello netzob
>>> nRegex = NetzobRegex.buildRegexForSizedValue((16, None))
>>> compiledRegex = re.compile(str(nRegex))
>>> dynamicDatas = compiledRegex.match(hexData)
>>> print TypeConverter.convert(hexData[dynamicDatas.start(nRegex.id):dynamicDatas.end(nRegex.id)], HexaString, ASCII)
Hello netzob
>>> nRegex = NetzobRegex.buildRegexForSizedValue((None, 80))
>>> compiledRegex = re.compile(str(nRegex))
>>> dynamicDatas = compiledRegex.match(hexData)
>>> print TypeConverter.convert(hexData[dynamicDatas.start(nRegex.id):dynamicDatas.end(nRegex.id)], HexaString, ASCII)
Hello netz
"""
def __init__(self, size):
super(NetzobSizedRegex, self).__init__()
self.size = size
def __updateRegex(self):
(minSize, maxSize) = self.size
if maxSize is None:
maxSize = ''
self.regex = ".{" + str(minSize) + "," + str(maxSize) + "}"
@property
def size(self):
return self.__size
@size.setter
def size(self, size):
(minSize, maxSize) = size
if minSize is None:
minSize = 0
minSize = minSize / 4
if maxSize is not None:
maxSize = maxSize / 4
if minSize < 0 or maxSize < 0:
raise ValueError("The value min and max cannot be inferior to 0")
if maxSize < minSize:
raise ValueError("The max size must be superior to the min size")
self.__size = (minSize, maxSize)
self.__updateRegex()
@NetzobLogger
class NetzobAggregateRegex(NetzobRegex):
"""Represents an aggregate regex.
Below is an example of such aggregate regex with two aggregated regexes..
>>> from netzob.all import *
>>> import regex as re
>>> data = "Hello netzob, what's up ?"
>>> hexData = TypeConverter.convert(data, ASCII, HexaString)
>>> regex1 = NetzobRegex.buildRegexForStaticValue("Hello netzob")
>>> regex2 = NetzobRegex.buildRegexForStaticValue(", what's up ?")
>>> nRegex = NetzobRegex.buildRegexForAggregateRegexes([regex1, regex2])
>>> compiledRegex = re.compile(str(nRegex))
>>> dynamicDatas = compiledRegex.match(hexData)
>>> print TypeConverter.convert(hexData[dynamicDatas.start(regex1.id):dynamicDatas.end(regex1.id)], HexaString, ASCII)
Hello netzob
>>> print TypeConverter.convert(hexData[dynamicDatas.start(regex2.id):dynamicDatas.end(regex2.id)], HexaString, ASCII)
, what's up ?
>>> print TypeConverter.convert(hexData[dynamicDatas.start(nRegex.id):dynamicDatas.end(nRegex.id)], HexaString, ASCII)
Hello netzob, what's up ?
"""
def __init__(self, children):
super(NetzobAggregateRegex, self).__init__()
self.__children = TypedList(NetzobRegex)
self.children = children
def __updateRegex(self):
self.regex = "".join([str(child) for child in self.children])
@property
def children(self):
return self.__children
@children.setter
def children(self, children):
self._logger.debug("PAN {0}".format(children))
# for child in children:
# if child is None:
# raise TypeError("No child can be None")
for child in children:
if child is not None:
self.__children.append(child)
self.__updateRegex()
@NetzobLogger
class NetzobAlternativeRegex(NetzobRegex):
"""Represents an alternative regex.
>>> from netzob.all import *
>>> import random
>>> import regex as re
>>> possibleData =["Netzob", "Zoby"]
>>> data = random.choice(possibleData)<|fim▁hole|> >>> regex1 = NetzobRegex.buildRegexForStaticValue("Netzob")
>>> regex2 = NetzobRegex.buildRegexForStaticValue("Zoby")
>>> nRegex = NetzobRegex.buildRegexForAlternativeRegexes([regex1, regex2])
>>> compiledRegex = re.compile(str(nRegex))
>>> dynamicDatas = compiledRegex.match(hexData)
>>> matchedData = TypeConverter.convert(hexData[dynamicDatas.start(nRegex.id):dynamicDatas.end(nRegex.id)], HexaString, ASCII)
>>> matchedData in possibleData
True
"""
def __init__(self, children):
super(NetzobAlternativeRegex, self).__init__()
self.__children = TypedList(NetzobRegex)
self.children = children
def __updateRegex(self):
self.regex = "|".join([str(child) for child in self.children])
@property
def children(self):
return self.__children
@children.setter
def children(self, children):
for child in children:
if child is None:
raise TypeError("No child can be None")
for child in children:
self.__children.append(child)
self.__updateRegex()
@NetzobLogger
class NetzobStaticRegex(NetzobRegex):
"""Represents a regex with a static value.
Example of a static regex:
>>> from netzob.all import *
>>> import regex as re
>>> data = "Hello netzob !"
>>> hexData = TypeConverter.convert(data, ASCII, HexaString)
>>> nRegex = NetzobRegex.buildRegexForStaticValue("Hello netzob !")
>>> compiledRegex = re.compile(str(nRegex))
>>> dynamicDatas = compiledRegex.match(hexData)
>>> print TypeConverter.convert(hexData[dynamicDatas.start(nRegex.id):dynamicDatas.end(nRegex.id)], HexaString, ASCII)
Hello netzob !
To illustrate that only an hexastring can be specified
>>> regex = NetzobStaticRegex("toto")
Traceback (most recent call last):
...
ValueError: The specified value cannot be parse as an HexaString: toto
"""
def __init__(self, staticValue):
super(NetzobStaticRegex, self).__init__()
self.staticValue = staticValue
def __updateRegex(self):
"""This method must be called everytime the static
value is modified"""
self.regex = self.staticValue
@property
def staticValue(self):
"""The static value which current regex represents.
:type: str: an hexastring
:raise: TypeError is param not valid
"""
return self.__staticValue
@staticValue.setter
@typeCheck(str)
def staticValue(self, staticValue):
if staticValue is None:
raise TypeError("Static value cannot be None")
if not HexaString().canParse(staticValue):
raise ValueError("The specified value cannot be parse as an HexaString: {0}".format(str(staticValue)))
self.__staticValue = staticValue.lower()
self.__updateRegex()<|fim▁end|>
|
>>> hexData = TypeConverter.convert(data, ASCII, HexaString)
|
<|file_name|>astar.rs<|end_file_name|><|fim▁begin|>use std::collections::hash_map::Entry::{Occupied, Vacant};
use std::collections::{BinaryHeap, HashMap};
use std::hash::Hash;
use super::visit::{EdgeRef, GraphBase, IntoEdges, VisitMap, Visitable};
use crate::scored::MinScored;
use crate::algo::Measure;
/// \[Generic\] A* shortest path algorithm.
///
/// Computes the shortest path from `start` to `finish`, including the total path cost.
///
/// `finish` is implicitly given via the `is_goal` callback, which should return `true` if the
/// given node is the finish node.
///
/// The function `edge_cost` should return the cost for a particular edge. Edge costs must be
/// non-negative.
///
/// The function `estimate_cost` should return the estimated cost to the finish for a particular
/// node. For the algorithm to find the actual shortest path, it should be admissible, meaning that
/// it should never overestimate the actual cost to get to the nearest goal node. Estimate costs
/// must also be non-negative.
///
/// The graph should be `Visitable` and implement `IntoEdges`.
///
/// # Example
/// ```
/// use petgraph::Graph;
/// use petgraph::algo::astar;
///
/// let mut g = Graph::new();<|fim▁hole|>/// let e = g.add_node((3., 3.));
/// let f = g.add_node((4., 2.));
/// g.extend_with_edges(&[
/// (a, b, 2),
/// (a, d, 4),
/// (b, c, 1),
/// (b, f, 7),
/// (c, e, 5),
/// (e, f, 1),
/// (d, e, 1),
/// ]);
///
/// // Graph represented with the weight of each edge
/// // Edges with '*' are part of the optimal path.
/// //
/// // 2 1
/// // a ----- b ----- c
/// // | 4* | 7 |
/// // d f | 5
/// // | 1* | 1* |
/// // \------ e ------/
///
/// let path = astar(&g, a, |finish| finish == f, |e| *e.weight(), |_| 0);
/// assert_eq!(path, Some((6, vec![a, d, e, f])));
/// ```
///
/// Returns the total cost + the path of subsequent `NodeId` from start to finish, if one was
/// found.
pub fn astar<G, F, H, K, IsGoal>(
graph: G,
start: G::NodeId,
mut is_goal: IsGoal,
mut edge_cost: F,
mut estimate_cost: H,
) -> Option<(K, Vec<G::NodeId>)>
where
G: IntoEdges + Visitable,
IsGoal: FnMut(G::NodeId) -> bool,
G::NodeId: Eq + Hash,
F: FnMut(G::EdgeRef) -> K,
H: FnMut(G::NodeId) -> K,
K: Measure + Copy,
{
let mut visited = graph.visit_map();
let mut visit_next = BinaryHeap::new();
let mut scores = HashMap::new();
let mut path_tracker = PathTracker::<G>::new();
let zero_score = K::default();
scores.insert(start, zero_score);
visit_next.push(MinScored(estimate_cost(start), start));
while let Some(MinScored(_, node)) = visit_next.pop() {
if is_goal(node) {
let path = path_tracker.reconstruct_path_to(node);
let cost = scores[&node];
return Some((cost, path));
}
// Don't visit the same node several times, as the first time it was visited it was using
// the shortest available path.
if !visited.visit(node) {
continue;
}
// This lookup can be unwrapped without fear of panic since the node was necessarily scored
// before adding him to `visit_next`.
let node_score = scores[&node];
for edge in graph.edges(node) {
let next = edge.target();
if visited.is_visited(&next) {
continue;
}
let mut next_score = node_score + edge_cost(edge);
match scores.entry(next) {
Occupied(ent) => {
let old_score = *ent.get();
if next_score < old_score {
*ent.into_mut() = next_score;
path_tracker.set_predecessor(next, node);
} else {
next_score = old_score;
}
}
Vacant(ent) => {
ent.insert(next_score);
path_tracker.set_predecessor(next, node);
}
}
let next_estimate_score = next_score + estimate_cost(next);
visit_next.push(MinScored(next_estimate_score, next));
}
}
None
}
struct PathTracker<G>
where
G: GraphBase,
G::NodeId: Eq + Hash,
{
came_from: HashMap<G::NodeId, G::NodeId>,
}
impl<G> PathTracker<G>
where
G: GraphBase,
G::NodeId: Eq + Hash,
{
fn new() -> PathTracker<G> {
PathTracker {
came_from: HashMap::new(),
}
}
fn set_predecessor(&mut self, node: G::NodeId, previous: G::NodeId) {
self.came_from.insert(node, previous);
}
fn reconstruct_path_to(&self, last: G::NodeId) -> Vec<G::NodeId> {
let mut path = vec![last];
let mut current = last;
while let Some(&previous) = self.came_from.get(¤t) {
path.push(previous);
current = previous;
}
path.reverse();
path
}
}<|fim▁end|>
|
/// let a = g.add_node((0., 0.));
/// let b = g.add_node((2., 0.));
/// let c = g.add_node((1., 1.));
/// let d = g.add_node((0., 2.));
|
<|file_name|>BoundaryLocationTest.js<|end_file_name|><|fim▁begin|>asynctest(
'browser.tinymce.core.keyboard.BoundaryLocationTest',
[
'ephox.agar.api.Assertions',
'ephox.agar.api.GeneralSteps',
'ephox.agar.api.Logger',
'ephox.agar.api.Pipeline',
'ephox.agar.api.Step',
'ephox.katamari.api.Fun',
'ephox.sugar.api.dom.Hierarchy',
'ephox.sugar.api.node.Element',
'ephox.sugar.api.search.Selectors',
'tinymce.core.caret.CaretPosition',
'tinymce.core.keyboard.BoundaryLocation',
'tinymce.core.test.ViewBlock',
'tinymce.core.text.Zwsp'
],
function (Assertions, GeneralSteps, Logger, Pipeline, Step, Fun, Hierarchy, Element, Selectors, CaretPosition, BoundaryLocation, ViewBlock, Zwsp) {
var success = arguments[arguments.length - 2];
var failure = arguments[arguments.length - 1];
var ZWSP = Zwsp.ZWSP;
var viewBlock = ViewBlock();
var createViewElement = function (html) {
viewBlock.update(html);
return Element.fromDom(viewBlock.get());
};
var createLocation = function (elm, elementPath, offset) {
var container = Hierarchy.follow(elm, elementPath);
var pos = new CaretPosition(container.getOrDie().dom(), offset);
var location = BoundaryLocation.readLocation(elm.dom(), pos);
return location;
};
var createPosition = function (elm, elementPath, offset) {
var container = Hierarchy.follow(elm, elementPath);<|fim▁hole|>
var locationName = function (location) {
return location.fold(
Fun.constant('before'),
Fun.constant('start'),
Fun.constant('end'),
Fun.constant('after')
);
};
var locationElement = function (location) {
return Element.fromDom(location.fold(
Fun.identity,
Fun.identity,
Fun.identity,
Fun.identity
));
};
var sTestValidLocation = function (html, elementPath, offset, expectedLocationName, expectedInline) {
return Step.sync(function () {
var elm = createViewElement(html);
var location = createLocation(elm, elementPath, offset);
Assertions.assertEq('Should be a valid location: ' + html, true, location.isSome());
Assertions.assertEq('Should be expected location', expectedLocationName, locationName(location.getOrDie()));
Assertions.assertDomEq('Should be expected element', Selectors.one(expectedInline, elm).getOrDie(), locationElement(location.getOrDie()));
});
};
var sTestInvalidLocation = function (html, elementPath, offset) {
return Step.sync(function () {
var elm = createViewElement(html);
var location = createLocation(elm, elementPath, offset);
Assertions.assertEq('Should not be a valid location: ' + html, true, location.isNone());
});
};
var sTestFindLocation = function (forward, html, elementPath, offset, expectedLocationName, expectedInline) {
return Step.sync(function () {
var elm = createViewElement(html);
var position = createPosition(elm, elementPath, offset);
var location = forward ? BoundaryLocation.nextLocation(elm.dom(), position) : BoundaryLocation.prevLocation(elm.dom(), position);
Assertions.assertDomEq('Should be expected element', Selectors.one(expectedInline, elm).getOrDie(), locationElement(location.getOrDie()));
Assertions.assertEq('Should be a valid location: ' + html, true, location.isSome());
Assertions.assertEq('Should be expected location', expectedLocationName, locationName(location.getOrDie()));
});
};
var sTestFindLocationInvalid = function (forward, html, elementPath, offset) {
return Step.sync(function () {
var elm = createViewElement(html);
var position = createPosition(elm, elementPath, offset);
var location = forward ? BoundaryLocation.nextLocation(elm.dom(), position) : BoundaryLocation.prevLocation(elm.dom(), position);
Assertions.assertEq('Should not be a valid location: ' + html, true, location.isNone());
});
};
var sTestPrevLocation = Fun.curry(sTestFindLocation, false);
var sTestNextLocation = Fun.curry(sTestFindLocation, true);
var sTestPrevLocationInvalid = Fun.curry(sTestFindLocationInvalid, false);
var sTestNextLocationInvalid = Fun.curry(sTestFindLocationInvalid, true);
var sTestValidLocations = Logger.t('sTestValidLocations', GeneralSteps.sequence([
sTestValidLocation('<p><a href="a">a</a></p>', [0], 0, 'before', 'a'),
sTestValidLocation('<p><a href="a">a</a></p>', [0, 0, 0], 0, 'start', 'a'),
sTestValidLocation('<p><a href="a">a</a></p>', [0, 0, 0], 1, 'end', 'a'),
sTestValidLocation('<p><a href="a">a</a></p>', [0], 1, 'after', 'a'),
sTestValidLocation('<p>a<a href="a">a</a></p>', [0, 0], 1, 'before', 'a'),
sTestValidLocation('<p><a href="a">a</a>a</p>', [0, 1], 0, 'after', 'a'),
sTestValidLocation('<p><a href="a">ab</a></p>', [0, 0, 0], 0, 'start', 'a'),
sTestValidLocation('<p><a href="a">ab</a></p>', [0, 0, 0], 2, 'end', 'a'),
sTestValidLocation('<p><img src="a"><a href="a">a</a></p>', [0], 1, 'before', 'a'),
sTestValidLocation('<p><a href="a"><img src="a"></a></p>', [0, 0], 0, 'start', 'a'),
sTestValidLocation('<p><a href="a"><img src="a"></a></p>', [0, 0], 1, 'end', 'a'),
sTestValidLocation('<p><a href="a">a</a><img src="a"></p>', [0], 1, 'after', 'a'),
sTestValidLocation('<p><a href="a">a</a></p><p><a href="b">b</a></p>', [0], 1, 'after', 'a'),
sTestValidLocation('<p><a href="a">a</a></p><p><a href="b">b</a></p>', [1], 0, 'before', 'p:nth-child(2) a')
]));
var sTestValidZwspLocations = Logger.t('sTestValidZwspLocations', GeneralSteps.sequence([
sTestValidLocation('<p>' + ZWSP + '<a href="a">a</a></p>', [0, 0], 0, 'before', 'a'),
sTestValidLocation('<p><a href="a">' + ZWSP + 'a</a></p>', [0, 0, 0], 1, 'start', 'a'),
sTestValidLocation('<p><a href="a">a' + ZWSP + '</a></p>', [0, 0, 0], 1, 'end', 'a'),
sTestValidLocation('<p><a href="a">a</a>' + ZWSP + '</p>', [0, 1], 1, 'after', 'a')
]));
var sTestInvalidLocations = Logger.t('sTestInvalidLocations', GeneralSteps.sequence([
sTestInvalidLocation('<p>a</p>', [0, 0], 0),
sTestInvalidLocation('<p><b>a</b></p>', [0], 0),
sTestInvalidLocation('<p><b>a</b></p>', [0], 1),
sTestInvalidLocation('<p>a<a href="a">a</a>b</p>', [0, 0], 0),
sTestInvalidLocation('<p>a<a href="a">a</a>b</p>', [0, 2], 1),
sTestInvalidLocation('<p><img src="a"><a href="a">a</a></p>', [0], 0),
sTestInvalidLocation('<p><a href="a">a</a><img src="a"></p>', [0], 2),
sTestInvalidLocation('<p><a href="a"><img src="a"><img src="a"></a><img src="a"></p>', [0, 0], 1),
sTestInvalidLocation('<p dir="rtl"><a href="a">a</a></p>', [0, 0, 0], 0),
sTestInvalidLocation('<p><a href="a">\u05D4</a></p>', [0, 0, 0], 0)
]));
var sTestPrevLocations = Logger.t('sTestPrevLocations', GeneralSteps.sequence([
sTestPrevLocation('<p><a href="a">a</a>b</p>', [0, 1], 1, 'after', 'a'),
sTestPrevLocation('<p><a href="a">a</a></p>', [0], 1, 'end', 'a'),
sTestPrevLocation('<p><a href="a">a</a></p>', [0, 0, 0], 1, 'start', 'a'),
sTestPrevLocation('<p><a href="a">a</a></p>', [0, 0, 0], 0, 'before', 'a'),
sTestPrevLocation('<p><a href="a"><img src="about:blank"></a></p>', [0], 1, 'end', 'a'),
sTestPrevLocation('<p><a href="a"><img src="about:blank"></a></p>', [0, 0], 1, 'start', 'a'),
sTestPrevLocation('<p><a href="a"><img src="about:blank"></a></p>', [0, 0], 0, 'before', 'a')
]));
var sTestPrevLocationsBetweenInlines = Logger.t('sTestPrevLocationsBetweenInlines', GeneralSteps.sequence([
sTestPrevLocation('<p><a href="a">a</a><a href="b">b</a></p>', [0, 1, 0], 0, 'before', 'a:nth-child(2)')
]));
var sTestPrevLocationsBetweenBlocks = Logger.t('sTestPrevLocationsBetweenBlocks', GeneralSteps.sequence([
sTestPrevLocation('<p><a href="a">a</a></p><p><a href="b">b</a></p>', [1], 0, 'end', 'p:nth-child(1) a'),
sTestPrevLocation('<p><a href="a">a</a></p><p><a href="b">b</a></p>', [1, 0, 0], 0, 'before', 'p:nth-child(2) a'),
sTestPrevLocation('<p><a href="a">a</a>b</p><p><a href="c">c</a></p>', [1, 0, 0], 0, 'before', 'p:nth-child(2) a'),
sTestPrevLocation('<p><a href="a">a</a><br /></p><p><a href="c">c</a></p>', [1], 0, 'after', 'p:nth-child(1) a'),
sTestPrevLocationInvalid('<p><a href="a">a</a></p><p>b<a href="c">c</a></p>', [1, 0], 1),
sTestPrevLocationInvalid('<p><a href="a">a</a>b</p><p><a href="c">c</a></p>', [1], 0)
]));
var sTestPrevZwspLocations = Logger.t('sTestPrevLocations', GeneralSteps.sequence([
sTestPrevLocation('<p><a href="a">a</a>' + ZWSP + 'b</p>', [0, 1], 2, 'after', 'a'),
sTestPrevLocation('<p><a href="a">a</a>' + ZWSP + '</p>', [0, 1], 1, 'end', 'a'),
sTestPrevLocation('<p><a href="a">a' + ZWSP + '</a></p>', [0, 0, 0], 1, 'start', 'a'),
sTestPrevLocation('<p><a href="a">' + ZWSP + 'a</a></p>', [0, 0, 0], 1, 'before', 'a')
]));
var sTestNextLocations = Logger.t('sTestNextLocations', GeneralSteps.sequence([
sTestNextLocation('<p>a<a href="a">b</a></p>', [0, 0], 0, 'before', 'a'),
sTestNextLocation('<p><a href="a">a</a></p>', [0], 0, 'start', 'a'),
sTestNextLocation('<p><a href="a">a</a></p>', [0, 0, 0], 0, 'end', 'a'),
sTestNextLocation('<p><a href="a">a</a></p>', [0, 0, 0], 1, 'after', 'a'),
sTestNextLocation('<p><a href="a"><img src="about:blank"></a></p>', [0], 0, 'start', 'a'),
sTestNextLocation('<p><a href="a"><img src="about:blank"></a></p>', [0, 0], 0, 'end', 'a'),
sTestNextLocation('<p><a href="a"><img src="about:blank"></a></p>', [0, 0], 1, 'after', 'a')
]));
var sTestNextLocationsBetweenInlines = Logger.t('sTestNextLocationsBetweenInlines', GeneralSteps.sequence([
sTestNextLocation('<p><a href="a">a</a><a href="a">b</a></p>', [0, 0, 0], 1, 'after', 'a:nth-child(1)')
]));
var sTestNextLocationsBetweenBlocks = Logger.t('sTestNextLocationsBetweenBlocks', GeneralSteps.sequence([
sTestNextLocation('<p><a href="a">a</a></p><p><a href="b">b</a></p>', [0], 1, 'start', 'p:nth-child(2) a'),
sTestNextLocation('<p><a href="a">a</a></p><p><a href="b">b</a></p>', [0, 0, 0], 1, 'after', 'p:nth-child(1) a'),
sTestNextLocationInvalid('<p><a href="a">a</a>b</p><p><a href="c">c</a></p>', [0, 1], 0),
sTestNextLocationInvalid('<p><a href="a">a</a></p><p>b<a href="c">c</a></p>', [0], 1)
]));
var sTestNextZwspLocations = Logger.t('sTestNextZwspLocations', GeneralSteps.sequence([
sTestNextLocation('<p>a' + ZWSP + '<a href="a">b</a></p>', [0, 0], 0, 'before', 'a'),
sTestNextLocation('<p>' + ZWSP + '<a href="a">a</a></p>', [0], 0, 'start', 'a'),
sTestNextLocation('<p><a href="a">' + ZWSP + 'a</a></p>', [0, 0, 0], 1, 'end', 'a'),
sTestNextLocation('<p><a href="a">a' + ZWSP + '</a></p>', [0, 0, 0], 1, 'after', 'a')
]));
viewBlock.attach();
Pipeline.async({}, [
sTestValidLocations,
sTestValidZwspLocations,
sTestInvalidLocations,
sTestPrevLocations,
sTestPrevLocationsBetweenInlines,
sTestPrevLocationsBetweenBlocks,
sTestPrevZwspLocations,
sTestNextLocations,
sTestNextLocationsBetweenInlines,
sTestNextLocationsBetweenBlocks,
sTestNextZwspLocations
], function () {
viewBlock.detach();
success();
}, failure);
}
);<|fim▁end|>
|
return new CaretPosition(container.getOrDie().dom(), offset);
};
|
<|file_name|>constants.py<|end_file_name|><|fim▁begin|>"""
sphinxit.core.constants
~~~~~~~~~~~~~~~~~~~~~~~
Defines some Sphinx-specific constants.
:copyright: (c) 2013 by Roman Semirook.
:license: BSD, see LICENSE for more details.
"""
from collections import namedtuple
RESERVED_KEYWORDS = (
'AND',
'AS',
'ASC',
'AVG',
'BEGIN',
'BETWEEN',
'BY',
'CALL',
'COLLATION',
'COMMIT',
'COUNT',
'DELETE',
'DESC',
'DESCRIBE',
'DISTINCT',
'FALSE',
'FROM',
'GLOBAL',
'GROUP',
'IN',
'INSERT',
'INTO',
'LIMIT',
'MATCH',
'MAX',
'META',
'MIN',
'NOT',
'NULL',<|fim▁hole|> 'ROLLBACK',
'SELECT',
'SET',
'SHOW',
'START',
'STATUS',
'SUM',
'TABLES',
'TRANSACTION',
'TRUE',
'UPDATE',
'VALUES',
'VARIABLES',
'WARNINGS',
'WEIGHT',
'WHERE',
'WITHIN'
)
ESCAPED_CHARS = namedtuple('EscapedChars', ['single_escape', 'double_escape'])(
single_escape=("'", '+', '[', ']', '=', '*'),
double_escape=('@', '!', '^', '(', ')', '~', '-', '|', '/', '<<', '$', '"')
)
NODES_ORDER = namedtuple('NodesOrder', ['select', 'update'])(
select=(
'SelectFrom',
'Where',
'GroupBy',
'OrderBy',
'WithinGroupOrderBy',
'Limit',
'Options'
),
update=(
'UpdateSet',
'Where',
'Options'
)
)<|fim▁end|>
|
'OPTION',
'OR',
'ORDER',
'REPLACE',
|
<|file_name|>committees.py<|end_file_name|><|fim▁begin|>import re
import lxml.html
from pupa.scrape import Scraper, Organization
class WYCommitteeScraper(Scraper):
members = {}
urls = {
"list": "http://legisweb.state.wy.us/LegbyYear/CommitteeList.aspx?Year=%s",
"detail": "http://legisweb.state.wy.us/LegbyYear/%s",
}
def scrape(self, session=None):
if session is None:
session = self.latest_session()
self.info('no session specified, using %s', session)
list_url = self.urls["list"] % (session, )
committees = {}
page = self.get(list_url).text
page = lxml.html.fromstring(page)
for el in page.xpath(".//a[contains(@href, 'CommitteeMembers')]"):
committees[el.text.strip()] = el.get("href")
for c in committees:
self.info(c)
detail_url = self.urls["detail"] % (committees[c],)
page = self.get(detail_url).text
page = lxml.html.fromstring(page)
if re.match('\d{1,2}-', c):
c = c.split('-', 1)[1]
jcomm = Organization(name=c.strip(), chamber='joint', classification='committee')
for table in page.xpath(".//table[contains(@id, 'CommitteeMembers')]"):
rows = table.xpath(".//tr")
chamber = rows[0].xpath('.//td')[0].text_content().strip()
chamber = 'upper' if chamber == 'Senator' else 'lower'
comm = Organization(name=c.strip(), chamber=chamber, classification='committee')<|fim▁hole|> role = 'chairman' if tds[3].text_content().strip() == 'Chairman' else 'member'
comm.add_member(name, role)
jcomm.add_member(name, role)
comm.add_source(detail_url)
yield comm
jcomm.add_source(detail_url)
yield jcomm<|fim▁end|>
|
for row in rows[1:]:
tds = row.xpath('.//td')
name = tds[0].text_content().strip()
|
<|file_name|>issue-3036.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Testing that semicolon tokens are printed correctly in errors
<|fim▁hole|>} //~ ERROR: expected one of `.`, `;`, or an operator, found `}`<|fim▁end|>
|
fn main()
{
let x = 3
|
<|file_name|>Plugins.py<|end_file_name|><|fim▁begin|>#
#
# (C) Copyright 2001 The Internet (Aust) Pty Ltd
# ACN: 082 081 472 ABN: 83 082 081 472
# All Rights Reserved
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# Author: Andrew Milton <[email protected]>
# $Id: Plugins.py,v 1.5 2004/11/10 14:15:33 akm Exp $
import App, Globals, OFS
import string<|fim▁hole|>import time
from Globals import ImageFile, HTMLFile, HTML, MessageDialog, package_home
from OFS.Folder import Folder
class PluginRegister:
def __init__(self, name, description, pluginClass,
pluginStartForm, pluginStartMethod,
pluginEditForm=None, pluginEditMethod=None):
self.name=name #No Spaces please...
self.description=description
self.plugin=pluginClass
self.manage_addForm=pluginStartForm
self.manage_addMethod=pluginStartMethod
self.manage_editForm=pluginEditForm
self.manage_editMethod=pluginEditMethod
class CryptoPluginRegister:
def __init__(self, name, crypto, description, pluginMethod):
self.name = name #No Spaces please...
self.cryptoMethod = crypto
self.description = description
self.plugin = pluginMethod<|fim▁end|>
| |
<|file_name|>views.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2012, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""WebSearch Flask Blueprint."""
import cStringIO
from functools import wraps
from flask import g, render_template, request, flash, redirect, url_for, \
current_app, abort, Blueprint, send_file
from flask_breadcrumbs import register_breadcrumb
from flask_breadcrumbs import default_breadcrumb_root
from flask_login import current_user
from flask_menu import register_menu
from invenio.base.decorators import wash_arguments
from invenio.base.globals import cfg
from invenio.base.i18n import _
from invenio.base.signals import pre_template_render
from invenio.config import CFG_SITE_RECORD
from invenio.ext.template.context_processor import \
register_template_context_processor
from invenio.modules.search.models import Collection
from invenio.modules.search.signals import record_viewed
from invenio.utils import apache
from .api import get_record
from .models import Record as Bibrec
from .utils import references_nb_counts, citations_nb_counts, \
visible_collection_tabs
blueprint = Blueprint('record', __name__, url_prefix="/" + CFG_SITE_RECORD,
static_url_path='/record', template_folder='templates',
static_folder='static')
default_breadcrumb_root(blueprint, 'breadcrumbs.record')
def request_record(f):
"""Perform standard operation to check record availability for user."""
@wraps(f)
def decorated(recid, *args, **kwargs):
from invenio.modules.access.mailcookie import \
mail_cookie_create_authorize_action
from invenio.modules.access.local_config import VIEWRESTRCOLL
from invenio.legacy.search_engine import \
guess_primary_collection_of_a_record, \
check_user_can_view_record
from invenio.b2share.modules.main.utils import check_fresh_record
# ensure recid to be integer
recid = int(recid)
from invenio.legacy.search_engine import record_exists, get_merged_recid
if record_exists(recid) == 0:
# record doesn't exist, abort so it doesn't get incorrectly cached
abort(apache.HTTP_NOT_FOUND) # The record is gone!
if check_fresh_record(current_user, recid):
return render_template('record_waitforit.html', recid=recid)
g.collection = collection = Collection.query.filter(
Collection.name == guess_primary_collection_of_a_record(recid)).\
one()
(auth_code, auth_msg) = check_user_can_view_record(current_user, recid)
# only superadmins can use verbose parameter for obtaining debug
# information
if not current_user.is_super_admin and 'verbose' in kwargs:
kwargs['verbose'] = 0
if auth_code and current_user.is_guest:
cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {
'collection': g.collection.name})
url_args = {'action': cookie, 'ln': g.ln, 'referer': request.url}
flash(_("Authorization failure"), 'error')
return redirect(url_for('webaccount.login', **url_args))<|fim▁hole|> from invenio.legacy.search_engine import record_exists, \
get_merged_recid
# check if the current record has been deleted
# and has been merged, case in which the deleted record
# will be redirect to the new one
record_status = record_exists(recid)
merged_recid = get_merged_recid(recid)
if record_status == -1 and merged_recid:
return redirect(url_for('record.metadata', recid=merged_recid))
elif record_status == -1:
abort(apache.HTTP_GONE) # The record is gone!
g.bibrec = Bibrec.query.get(recid)
record = get_record(recid)
if record is None:
return render_template('404.html')
title = record.get(cfg.get('RECORDS_BREADCRUMB_TITLE_KEY'), '')
tabs = []
if cfg.get('CFG_WEBLINKBACK_TRACKBACK_ENABLED'):
@register_template_context_processor
def trackback_context():
from invenio.legacy.weblinkback.templates import \
get_trackback_auto_discovery_tag
return {'headerLinkbackTrackbackLink':
get_trackback_auto_discovery_tag(recid)}
def _format_record(recid, of='hd', user_info=current_user, *args,
**kwargs):
from invenio.modules.formatter import format_record
return format_record(recid, of, user_info=user_info, *args,
**kwargs)
@register_template_context_processor
def record_context():
from invenio.modules.comments.api import get_mini_reviews
from invenio.legacy.bibdocfile.api import BibRecDocs
all_files = [f for f in BibRecDocs(recid, human_readable=True).list_latest_files(list_hidden=False) \
if not f.is_icon()]
files = [f for f in all_files if f.is_restricted(current_user)[0] == 0]
has_private_files = len(files) < len(all_files)
return dict(recid=recid,
record=record,
tabs=tabs,
title=title,
get_mini_reviews=get_mini_reviews,
collection=collection,
format_record=_format_record,
has_private_files=has_private_files,
files=files
)
pre_template_render.send(
"%s.%s" % (blueprint.name, f.__name__),
recid=recid,
)
return f(recid, *args, **kwargs)
return decorated
@blueprint.route('/<int:recid>/metadata', methods=['GET', 'POST'])
@blueprint.route('/<int:recid>/', methods=['GET', 'POST'])
@blueprint.route('/<int:recid>', methods=['GET', 'POST'])
@blueprint.route('/<int:recid>/export/<of>', methods=['GET', 'POST'])
@register_breadcrumb(blueprint, '.', _('Record'))
@wash_arguments({'of': (unicode, 'hd'), 'ot': (unicode, None)})
@request_record
@register_menu(blueprint, 'record.metadata', _('Information'), order=1,
endpoint_arguments_constructor=lambda:
dict(recid=request.view_args.get('recid')),
visible_when=visible_collection_tabs('metadata'))
def metadata(recid, of='hd', ot=None):
"""Display formated record metadata."""
from invenio.legacy.bibrank.downloads_similarity import \
register_page_view_event
from invenio.modules.formatter import get_output_format_content_type
register_page_view_event(recid, current_user.get_id(),
str(request.remote_addr))
if get_output_format_content_type(of) != 'text/html':
from invenio.modules.search.views.search import \
response_formated_records
return response_formated_records([recid], g.collection, of, qid=None)
# Send the signal 'document viewed'
record_viewed.send(
current_app._get_current_object(),
recid=recid,
id_user=current_user.get_id(),
request=request)
from invenio.b2share.modules.b2deposit.edit import is_record_editable
return render_template('records/metadata.html', of=of, ot=ot,
editable=is_record_editable(recid))
@blueprint.route('/<int:recid>/references', methods=['GET', 'POST'])
@request_record
@register_menu(blueprint, 'record.references', _('References'), order=2,
visible_when=visible_collection_tabs('references'),
endpoint_arguments_constructor=lambda:
dict(recid=request.view_args.get('recid')),
count=references_nb_counts)
def references(recid):
"""Display references."""
return render_template('records/references.html')
@blueprint.route('/<int:recid>/files', methods=['GET', 'POST'])
@request_record
@register_menu(blueprint, 'record.files', _('Files'), order=8,
endpoint_arguments_constructor=lambda:
dict(recid=request.view_args.get('recid')),
visible_when=visible_collection_tabs('files'))
def files(recid):
"""Return overview of attached files."""
def get_files():
from invenio.legacy.bibdocfile.api import BibRecDocs
for bibdoc in BibRecDocs(recid).list_bibdocs():
for file in bibdoc.list_all_files():
yield file.get_url()
return render_template('records/files.html', files=list(get_files()))
@blueprint.route('/<int:recid>/files/<path:filename>', methods=['GET'])
@request_record
def file(recid, filename):
"""Serve attached documents."""
from invenio.modules.documents import api
record = get_record(recid)
duuids = [uuid for (k, uuid) in record.get('_documents', [])
if k == filename]
error = 404
for duuid in duuids:
document = api.Document.get_document(duuid)
if not document.is_authorized(current_user):
current_app.logger.info(
"Unauthorized access to /{recid}/files/{filename} "
"({document}) by {current_user}".format(
recid=recid, filename=filename, document=document,
current_user=current_user))
error = 401
continue
# TODO add logging of downloads
if document.get('linked', False):
if document.get('uri').startswith('http://') or \
document.get('uri').startswith('https://'):
return redirect(document.get('uri'))
# FIXME create better streaming support
file_ = cStringIO.StringIO(document.open('rb').read())
file_.seek(0)
return send_file(file_, mimetype='application/octet-stream',
attachment_filename=filename)
return send_file(document['uri'])
abort(error)
@blueprint.route('/<int:recid>/citations', methods=['GET', 'POST'])
@request_record
@register_menu(blueprint, 'record.citations', _('Citations'), order=3,
visible_when=visible_collection_tabs('citations'),
endpoint_arguments_constructor=lambda:
dict(recid=request.view_args.get('recid')),
count=citations_nb_counts)
def citations(recid):
"""Display citations."""
from invenio.legacy.bibrank.citation_searcher import calculate_cited_by_list,\
get_self_cited_by, calculate_co_cited_with_list
citations = dict(
citinglist=calculate_cited_by_list(recid),
selfcited=get_self_cited_by(recid),
co_cited=calculate_co_cited_with_list(recid)
)
return render_template('records/citations.html',
citations=citations)
@blueprint.route('/<int:recid>/keywords', methods=['GET', 'POST'])
@request_record
@register_menu(blueprint, 'record.keywords', _('Keywords'), order=4,
endpoint_arguments_constructor=lambda:
dict(recid=request.view_args.get('recid')),
visible_when=visible_collection_tabs('keywords'))
def keywords(recid):
"""Return keywords overview."""
from invenio.legacy.bibclassify.webinterface import record_get_keywords
found, keywords, record = record_get_keywords(recid)
return render_template('records/keywords.html',
found=found,
keywords=keywords)
@blueprint.route('/<int:recid>/usage', methods=['GET', 'POST'])
@request_record
@register_menu(blueprint, 'record.usage', _('Usage statistics'), order=7,
endpoint_arguments_constructor=lambda:
dict(recid=request.view_args.get('recid')),
visible_when=visible_collection_tabs('usage'))
def usage(recid):
"""Return usage statistics."""
from invenio.legacy.bibrank.downloads_similarity import \
calculate_reading_similarity_list
from invenio.legacy.bibrank.downloads_grapher import \
create_download_history_graph_and_box
viewsimilarity = calculate_reading_similarity_list(recid, "pageviews")
downloadsimilarity = calculate_reading_similarity_list(recid, "downloads")
downloadgraph = create_download_history_graph_and_box(recid)
return render_template('records/usage.html',
viewsimilarity=viewsimilarity,
downloadsimilarity=downloadsimilarity,
downloadgraph=downloadgraph)
@blueprint.route('/', methods=['GET', 'POST'])
def no_recid():
"""Redirect to homepage."""
return redirect("/")<|fim▁end|>
|
elif auth_code:
flash(auth_msg, 'error')
abort(apache.HTTP_UNAUTHORIZED)
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|>
|
"""Intesishome platform."""
|
<|file_name|>LdapInjectionJndi.java<|end_file_name|><|fim▁begin|>import javax.naming.directory.DirContext;
import org.owasp.esapi.Encoder;
import org.owasp.esapi.reference.DefaultEncoder;
public void ldapQueryBad(HttpServletRequest request, DirContext ctx) throws NamingException {
String organizationName = request.getParameter("organization_name");
String username = request.getParameter("username");
// BAD: User input used in DN (Distinguished Name) without encoding
String dn = "OU=People,O=" + organizationName;
// BAD: User input used in search filter without encoding
String filter = "username=" + userName;
ctx.search(dn, filter, new SearchControls());
}
public void ldapQueryGood(HttpServletRequest request, DirContext ctx) throws NamingException {<|fim▁hole|>
// ESAPI encoder
Encoder encoder = DefaultEncoder.getInstance();
// GOOD: Organization name is encoded before being used in DN
String safeOrganizationName = encoder.encodeForDN(organizationName);
String safeDn = "OU=People,O=" + safeOrganizationName;
// GOOD: User input is encoded before being used in search filter
String safeUsername = encoder.encodeForLDAP(username);
String safeFilter = "username=" + safeUsername;
ctx.search(safeDn, safeFilter, new SearchControls());
}<|fim▁end|>
|
String organizationName = request.getParameter("organization_name");
String username = request.getParameter("username");
|
<|file_name|>choice.py<|end_file_name|><|fim▁begin|>import psidialogs
<|fim▁hole|>s = psidialogs.choice(["1", "2", "3"], "Choose a number!")
if s is not None:
print(s)<|fim▁end|>
| |
<|file_name|>gsdSubsample.py<|end_file_name|><|fim▁begin|>#open a gsd file and write out a subsampled version, keeping only every N timesteps
#useful if you want to be analyzing a shorter trajectory
import gsd.hoomd
import argparse
import time<|fim▁hole|>parser.add_argument('ofname',metavar='output',type=str,help='where to write subsampled trajectory file')
parser.add_argument('N',metavar='N',type=int,help='keep frame each N timesteps')
args = parser.parse_args()
traj = gsd.hoomd.open(args.fname)
frame0 = traj[0]
newtraj = gsd.hoomd.open(args.ofname,'wb')
newtraj.append(frame0)
for i in range(args.N,len(traj),args.N):
s = gsd.hoomd.Snapshot()
pos = traj[i].particles.position
s.particles.position = pos
s.particles.N = len(pos)
newtraj.append(s)
end = time.time()
print('Subsampling took {0} s.'.format(end-start))<|fim▁end|>
|
start = time.time()
parser = argparse.ArgumentParser(description='Subsamble GSD trajectory')
parser.add_argument('fname',metavar='input',type=str,help='trajectory file to be subsampled')
|
<|file_name|>stock.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import print_function, unicode_literals
import frappe, random, erpnext
from frappe.desk import query_report
from erpnext.stock.stock_ledger import NegativeStockError
from erpnext.stock.doctype.serial_no.serial_no import SerialNoRequiredError, SerialNoQtyError
from erpnext.stock.doctype.batch.batch import UnableToSelectBatchError
from erpnext.stock.doctype.delivery_note.delivery_note import make_sales_return
from erpnext.stock.doctype.purchase_receipt.purchase_receipt import make_purchase_return
def work():
frappe.set_user(frappe.db.get_global('demo_manufacturing_user'))
make_purchase_receipt()
make_delivery_note()
make_stock_reconciliation()
submit_draft_stock_entries()
make_sales_return_records()
make_purchase_return_records()
def make_purchase_receipt():
if random.random() < 0.6:
from erpnext.buying.doctype.purchase_order.purchase_order import make_purchase_receipt
report = "Purchase Order Items To Be Received"
po_list =list(set([r[0] for r in query_report.run(report)["result"] if r[0]!="'Total'"]))[:random.randint(1, 10)]
for po in po_list:
pr = frappe.get_doc(make_purchase_receipt(po))
if pr.is_subcontracted=="Yes":
pr.supplier_warehouse = "Supplier - WPL"
pr.posting_date = frappe.flags.current_date
pr.insert()
try:
pr.submit()
except NegativeStockError:
print('Negative stock for {0}'.format(po))
pass
frappe.db.commit()
def make_delivery_note():
# make purchase requests
# make delivery notes (if possible)
if random.random() < 0.6:
from erpnext.selling.doctype.sales_order.sales_order import make_delivery_note
report = "Ordered Items To Be Delivered"
for so in list(set([r[0] for r in query_report.run(report)["result"]
if r[0]!="'Total'"]))[:random.randint(1, 3)]:
dn = frappe.get_doc(make_delivery_note(so))
dn.posting_date = frappe.flags.current_date
for d in dn.get("items"):
if not d.expense_account:
d.expense_account = ("Cost of Goods Sold - {0}".format(
frappe.get_cached_value('Company', dn.company, 'abbr')))
try:
dn.insert()
dn.submit()
frappe.db.commit()
except (NegativeStockError, SerialNoRequiredError, SerialNoQtyError, UnableToSelectBatchError):
frappe.db.rollback()
def make_stock_reconciliation():
# random set some items as damaged
from erpnext.stock.doctype.stock_reconciliation.stock_reconciliation \
import OpeningEntryAccountError, EmptyStockReconciliationItemsError
if random.random() < 0.4:
stock_reco = frappe.new_doc("Stock Reconciliation")
stock_reco.posting_date = frappe.flags.current_date
stock_reco.company = erpnext.get_default_company()
stock_reco.get_items_for("Stores - WP")
if stock_reco.items:
for item in stock_reco.items:
if item.qty:
item.qty = item.qty - round(random.randint(1, item.qty))
try:
stock_reco.insert()
stock_reco.submit()
frappe.db.commit()
except OpeningEntryAccountError:
frappe.db.rollback()
except EmptyStockReconciliationItemsError:
frappe.db.rollback()
def submit_draft_stock_entries():
from erpnext.stock.doctype.stock_entry.stock_entry import IncorrectValuationRateError, \
DuplicateEntryForWorkOrderError, OperationsNotCompleteError
# try posting older drafts (if exists)
frappe.db.commit()
for st in frappe.db.get_values("Stock Entry", {"docstatus":0}, "name"):
try:
ste = frappe.get_doc("Stock Entry", st[0])
ste.posting_date = frappe.flags.current_date
ste.save()
ste.submit()
frappe.db.commit()
except (NegativeStockError, IncorrectValuationRateError, DuplicateEntryForWorkOrderError,
OperationsNotCompleteError):
frappe.db.rollback()
def make_sales_return_records():
if random.random() < 0.1:
for data in frappe.get_all('Delivery Note', fields=["name"], filters={"docstatus": 1}):
if random.random() < 0.1:
try:
dn = make_sales_return(data.name)
dn.insert()
dn.submit()
frappe.db.commit()<|fim▁hole|> frappe.db.rollback()
def make_purchase_return_records():
if random.random() < 0.1:
for data in frappe.get_all('Purchase Receipt', fields=["name"], filters={"docstatus": 1}):
if random.random() < 0.1:
try:
pr = make_purchase_return(data.name)
pr.insert()
pr.submit()
frappe.db.commit()
except Exception:
frappe.db.rollback()<|fim▁end|>
|
except Exception:
|
<|file_name|>quiz1.py<|end_file_name|><|fim▁begin|>"""Softmax."""
scores = [3.0, 1.0, 0.2]
import numpy as np
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
return np.exp(x) / sum(np.exp(x))
print(softmax(scores))
<|fim▁hole|># Plot softmax curves
import matplotlib.pyplot as plt
x = np.arange(-2.0, 6.0, 0.1)
scores = np.vstack([x, np.ones_like(x), 0.2 * np.ones_like(x)])
plt.plot(x, softmax(scores).T, linewidth=2)
plt.show()<|fim▁end|>
| |
<|file_name|>test_purchase.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <[email protected]>
##
import datetime
import mock
import gtk
from stoqlib.api import api
from stoq.gui.purchase import PurchaseApp
from stoq.gui.test.baseguitest import BaseGUITest
from stoqlib.domain.purchase import PurchaseItem, PurchaseOrder, PurchaseOrderView
from stoqlib.domain.receiving import (ReceivingOrderItem, ReceivingOrder,
PurchaseReceivingMap)
from stoqlib.gui.dialogs.purchasedetails import PurchaseDetailsDialog
from stoqlib.gui.search.searchresultview import SearchResultListView
from stoqlib.gui.wizards.consignmentwizard import ConsignmentWizard
from stoqlib.gui.wizards.productwizard import ProductCreateWizard
from stoqlib.gui.wizards.purchasefinishwizard import PurchaseFinishWizard
from stoqlib.gui.wizards.purchasequotewizard import QuotePurchaseWizard
from stoqlib.gui.wizards.purchasewizard import PurchaseWizard
from stoqlib.reporting.purchase import PurchaseReport
class TestPurchase(BaseGUITest):
def create_app(self, *args, **kwargs):
app = BaseGUITest.create_app(self, *args, **kwargs)
app.branch_filter.combo.select_item_by_data(None)
return app
def test_initial(self):
app = self.create_app(PurchaseApp, u'purchase')
for purchase in app.results:
purchase.open_date = datetime.datetime(2012, 1, 1)
self.check_app(app, u'purchase')
def test_select(self):
self.create_purchase_order()
app = self.create_app(PurchaseApp, u'purchase')
results = app.results
results.select(results[0])
@mock.patch('stoq.gui.purchase.PurchaseApp.run_dialog')
def test_edit_quote_order(self, run_dialog):
api.sysparam.set_bool(self.store, 'SMART_LIST_LOADING', False)
purchase = self.create_purchase_order()
app = self.create_app(PurchaseApp, u'purchase')
for purchase in app.results:
purchase.open_date = datetime.datetime(2012, 1, 1)
olist = app.results
olist.select(olist[0])
with mock.patch('stoq.gui.purchase.api', new=self.fake.api):
self.fake.set_retval(purchase)
self.activate(app.NewQuote)
self.assertEquals(run_dialog.call_count, 1)
args, kwargs = run_dialog.call_args
wizard, store, edit_mode = args
self.assertEquals(wizard, QuotePurchaseWizard)
self.assertTrue(store is not None)
self.assertEquals(edit_mode, None)
@mock.patch('stoq.gui.purchase.PurchaseApp.print_report')
def test_print_report(self, print_report):
api.sysparam.set_bool(self.store, 'SMART_LIST_LOADING', False)
app = self.create_app(PurchaseApp, u'purchase')
self.activate(app.window.Print)
self.assertEquals(print_report.call_count, 1)
args, kwargs = print_report.call_args
report, results, views = args
self.assertEquals(report, PurchaseReport)
self.assertTrue(isinstance(results, SearchResultListView))
for view in views:
self.assertTrue(isinstance(view, PurchaseOrderView))
@mock.patch('stoq.gui.purchase.PurchaseApp.select_result')
@mock.patch('stoq.gui.purchase.PurchaseApp.run_dialog')
@mock.patch('stoq.gui.purchase.api.new_store')
def test_new_quote_order(self, new_store, run_dialog, select_result):
new_store.return_value = self.store
self.clean_domain([ReceivingOrderItem, PurchaseReceivingMap,
ReceivingOrder, PurchaseItem, PurchaseOrder])
quotation = self.create_quotation()
quotation.purchase.add_item(self.create_sellable(), 2)
quotation.purchase.status = PurchaseOrder.ORDER_PENDING
api.sysparam.set_bool(self.store, 'SMART_LIST_LOADING', False)
app = self.create_app(PurchaseApp, u'purchase')
olist = app.results
olist.select(olist[0])
self.store.retval = olist[0]
with mock.patch.object(self.store, 'close'):
with mock.patch.object(self.store, 'commit'):
self.activate(app.Edit)
run_dialog.assert_called_once_with(PurchaseWizard,
self.store,
quotation.purchase, False)
select_result.assert_called_once_with(olist[0])
@mock.patch('stoq.gui.purchase.PurchaseApp.run_dialog')
def test_details_dialog(self, run_dialog):
self.clean_domain([ReceivingOrderItem, PurchaseReceivingMap,
ReceivingOrder, PurchaseItem, PurchaseOrder])
purchase = self.create_purchase_order()
purchase.add_item(self.create_sellable(), 2)
api.sysparam.set_bool(self.store, 'SMART_LIST_LOADING', False)
app = self.create_app(PurchaseApp, u'purchase')
olist = app.results
olist.select(olist[0])
olist.double_click(0)
self.assertEquals(run_dialog.call_count, 1)
args, kwargs = run_dialog.call_args
dialog, store = args
self.assertEquals(dialog, PurchaseDetailsDialog)
self.assertTrue(store is not None)
self.assertEquals(kwargs[u'model'], purchase)
@mock.patch('stoq.gui.purchase.yesno')
@mock.patch('stoq.gui.purchase.api.new_store')
def test_confirm_order(self, new_store, yesno):
new_store.return_value = self.store
yesno.return_value = True
self.clean_domain([ReceivingOrderItem, PurchaseReceivingMap,
ReceivingOrder, PurchaseItem, PurchaseOrder])
purchase = self.create_purchase_order()
purchase.add_item(self.create_sellable(), 2)
purchase.status = PurchaseOrder.ORDER_PENDING<|fim▁hole|>
api.sysparam.set_bool(self.store, 'SMART_LIST_LOADING', False)
app = self.create_app(PurchaseApp, u'purchase')
olist = app.results
olist.select(olist[0])
with mock.patch.object(self.store, 'close'):
with mock.patch.object(self.store, 'commit'):
self.activate(app.Confirm)
yesno.assert_called_once_with(u'The selected order will be '
u'marked as sent.',
gtk.RESPONSE_YES,
u"Confirm order", u"Don't confirm")
self.assertEquals(purchase.status, PurchaseOrder.ORDER_CONFIRMED)
@mock.patch('stoq.gui.purchase.PurchaseApp.run_dialog')
@mock.patch('stoq.gui.purchase.api.new_store')
def test_finish_order(self, new_store, run_dialog):
new_store.return_value = self.store
self.clean_domain([ReceivingOrderItem, PurchaseReceivingMap,
ReceivingOrder, PurchaseItem, PurchaseOrder])
purchase = self.create_purchase_order()
purchase.add_item(self.create_sellable(), 2)
purchase.get_items()[0].quantity_received = 2
purchase.status = PurchaseOrder.ORDER_CONFIRMED
purchase.received_quantity = 2
api.sysparam.set_bool(self.store, 'SMART_LIST_LOADING', False)
app = self.create_app(PurchaseApp, u'purchase')
olist = app.results
olist.select(olist[0])
with mock.patch.object(self.store, 'close'):
with mock.patch.object(self.store, 'commit'):
self.activate(app.Finish)
run_dialog.assert_called_once_with(PurchaseFinishWizard,
self.store, purchase)
@mock.patch('stoq.gui.purchase.yesno')
@mock.patch('stoq.gui.purchase.api.new_store')
def test_cancel_order(self, new_store, yesno):
new_store.return_value = self.store
yesno.return_value = True
self.clean_domain([ReceivingOrderItem, PurchaseReceivingMap,
ReceivingOrder, PurchaseItem, PurchaseOrder])
purchase = self.create_purchase_order()
purchase.add_item(self.create_sellable(), 2)
purchase.status = PurchaseOrder.ORDER_PENDING
api.sysparam.set_bool(self.store, 'SMART_LIST_LOADING', False)
app = self.create_app(PurchaseApp, u'purchase')
olist = app.results
olist.select(olist[0])
with mock.patch.object(self.store, 'close'):
with mock.patch.object(self.store, 'commit'):
self.activate(app.Cancel)
yesno.assert_called_once_with(u'The selected order will be '
u'cancelled.', gtk.RESPONSE_YES,
u"Cancel order", u"Don't cancel")
self.assertEquals(purchase.status, PurchaseOrder.ORDER_CANCELLED)
@mock.patch('stoqlib.gui.wizards.productwizard.run_dialog')
@mock.patch('stoqlib.gui.wizards.productwizard.api.new_store')
def test_new_product(self, new_store, run_dialog):
run_dialog.return_value = False
new_store.return_value = self.store
self.clean_domain([ReceivingOrderItem, PurchaseReceivingMap,
ReceivingOrder, PurchaseItem, PurchaseOrder])
purchase = self.create_purchase_order()
purchase.add_item(self.create_sellable(), 2)
purchase.status = PurchaseOrder.ORDER_PENDING
api.sysparam.set_bool(self.store, 'SMART_LIST_LOADING', False)
app = self.create_app(PurchaseApp, u'purchase')
olist = app.results
olist.select(olist[0])
with mock.patch.object(self.store, 'close'):
with mock.patch.object(self.store, 'commit'):
self.activate(app.NewProduct)
run_dialog.assert_called_once_with(ProductCreateWizard,
app, self.store)
@mock.patch('stoq.gui.purchase.PurchaseApp.run_dialog')
def test_new_consignment(self, run_dialog):
api.sysparam.set_bool(self.store, 'SMART_LIST_LOADING', False)
purchase = self.create_purchase_order()
app = self.create_app(PurchaseApp, u'purchase')
for purchase in app.results:
purchase.open_date = datetime.datetime(2012, 1, 1)
olist = app.results
olist.select(olist[0])
with mock.patch('stoq.gui.purchase.api', new=self.fake.api):
self.fake.set_retval(purchase)
self.activate(app.NewConsignment)
self.assertEquals(run_dialog.call_count, 1)
args, kwargs = run_dialog.call_args
wizard, store = args
self.assertEquals(wizard, ConsignmentWizard)
self.assertTrue(store is not None)
self.assertEquals(kwargs[u'model'], None)<|fim▁end|>
| |
<|file_name|>16.d.ts<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|>
|
export { ThunderstormStrong16 as default } from "../../";
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.