repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
rschnapka/bank-payment
|
account_direct_debit/model/account_invoice.py
|
1
|
5845
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2011 - 2013 Therp BV (<http://therp.nl>).
#
# All other contributions are (C) by their respective contributors
#
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
This module adds support for Direct debit orders as applicable
in the Netherlands. Debit orders are advanced in total by the bank.
Amounts that cannot be debited or are canceled by account owners are
credited afterwards. Such a creditation is called a storno.
Invoice workflow:
1 the sale leads to
1300 Debtors 100
8000 Sales 100
Balance:
Debtors 2000 |
Sales | 2000
2 an external booking takes place
1100 Bank 100
1300 Debtors 100
This booking is reconciled with [1]
The invoice gets set to state 'paid', and 'reconciled' = True
Balance:
Debtors 1900 |
Bank 100 |
Sales | 2000
This module implements the following diversion:
2a the invoice is included in a direct debit order. When the order is
confirmed, a move is created per invoice:
2000 Transfer account 100 |
1300 Debtors | 100
Reconciliation takes place between 1 and 2a.
The invoice gets set to state 'paid', and 'reconciled' = True
Balance:
Debtors 0 |
Transfer account 2000 |
Bank 0 |
Sales | 2000
3a the direct debit order is booked on the bank account
Balance:
1100 Bank 2000 |
2000 Transfer account | 2000
Reconciliation takes place between 3a and 2a
Balance:
Debtors 0 |
Transfer account 0 |
Bank 2000 |
Sales | 2000
4 a storno from invoice [1] triggers a new booking on the bank account
1300 Debtors 100 |
1100 Bank | 100
Balance:
Debtors 100 |
Transfer account 0 |
Bank 1900 |
Sales | 2000
The reconciliation of 2a is undone. The booking of 2a is reconciled
with the booking of 4 instead.
The payment line attribute 'storno' is set to True and the invoice
state is no longer 'paid'.
Two cases need to be distinguisted:
1) If the storno is a manual storno from the partner, the invoice is set to
state 'debit_denied', with 'reconciled' = False
This module implements this option by allowing the bank module to call
netsvc.LocalService("workflow").trg_validate(
uid, 'account.invoice', ids, 'debit_denied', cr)
2) If the storno is an error generated by the bank (assumingly non-fatal),
the invoice is reopened for the next debit run.
This is a call to existing
netsvc.LocalService("workflow").trg_validate(
uid, 'account.invoice', ids, 'open_test', cr)
Should also be adding a log entry on the invoice for tracing purposes
self._log_event(cr, uid, ids, -1.0, 'Debit denied')
If not for that funny comment
"#TODO: implement messages system" in account/invoice.py
Repeating non-fatal fatal errors need to be dealt with manually by checking
open invoices with a matured invoice- or due date.
"""
from openerp.osv import orm
from openerp.tools.translate import _
class account_invoice(orm.Model):
_inherit = "account.invoice"
def __init__(self, pool, cr):
"""
Adding a state to the hardcoded state list of the inherited
model. The alternative is duplicating the field definition
in columns but only one module can do that!
Maybe apply a similar trick when overriding the buttons' 'states'
attributes in the form view, manipulating the xml in fields_view_get().
"""
super(account_invoice, self).__init__(pool, cr)
invoice_obj = pool.get('account.invoice')
invoice_obj._columns['state'].selection.append(
('debit_denied', 'Debit denied'))
def action_debit_denied(self, cr, uid, ids, context=None):
for invoice_id in ids:
if self.test_paid(cr, uid, [invoice_id], context):
number = self.read(
cr, uid, invoice_id, ['number'], context=context)['number']
raise orm.except_orm(
_('Error !'),
_("You cannot set invoice '%s' to state 'debit denied', "
'as it is still reconciled.') % number)
self.write(cr, uid, ids, {'state': 'debit_denied'}, context=context)
for inv_id, name in self.name_get(cr, uid, ids, context=context):
message = _("Invoice '%s': direct debit is denied.") % name
self.log(cr, uid, inv_id, message)
return True
def test_undo_debit_denied(self, cr, uid, ids, context=None):
"""
Called from the workflow. Used to unset paid state on
invoices that were paid with bank transfers which are being cancelled
"""
for invoice in self.read(cr, uid, ids, ['reconciled'], context):
if not invoice['reconciled']:
return False
return True
|
agpl-3.0
| -1,404,891,536,912,574,000 | -4,620,233,653,980,138,000 | 34.424242 | 79 | 0.623268 | false |
gautam1858/tensorflow
|
tensorflow/contrib/framework/python/ops/ops_test.py
|
118
|
2882
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tensor_util tests."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.ops import ops as ops_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
class OpsTest(test.TestCase):
def testGetGraphFromEmptyInputs(self):
with ops.Graph().as_default() as g0:
self.assertIs(g0, ops_lib.get_graph_from_inputs([]))
def testGetGraphFromValidInputs(self):
g0 = ops.Graph()
with g0.as_default():
values = [constant_op.constant(0.0), constant_op.constant(1.0)]
self.assertIs(g0, ops_lib.get_graph_from_inputs(values))
self.assertIs(g0, ops_lib.get_graph_from_inputs(values, g0))
with ops.Graph().as_default():
self.assertIs(g0, ops_lib.get_graph_from_inputs(values))
self.assertIs(g0, ops_lib.get_graph_from_inputs(values, g0))
def testGetGraphFromInvalidInputs(self):
g0 = ops.Graph()
with g0.as_default():
values = [constant_op.constant(0.0), constant_op.constant(1.0)]
g1 = ops.Graph()
with self.assertRaisesRegexp(ValueError, "not from the passed-in graph"):
ops_lib.get_graph_from_inputs(values, g1)
with g1.as_default():
values.append(constant_op.constant(2.0))
with self.assertRaisesRegexp(ValueError, "must be from the same graph"):
ops_lib.get_graph_from_inputs(values)
with self.assertRaisesRegexp(ValueError, "not from the passed-in graph"):
ops_lib.get_graph_from_inputs(values, g0)
with self.assertRaisesRegexp(ValueError, "not from the passed-in graph"):
ops_lib.get_graph_from_inputs(values, g1)
def testGetNameScope(self):
with ops.name_scope("scope1"):
with ops.name_scope("scope2"):
with ops.name_scope("scope3"):
self.assertEqual("scope1/scope2/scope3", ops_lib.get_name_scope())
self.assertEqual("scope1/scope2", ops_lib.get_name_scope())
self.assertEqual("scope1", ops_lib.get_name_scope())
self.assertEqual("", ops_lib.get_name_scope())
if __name__ == "__main__":
test.main()
|
apache-2.0
| -3,701,136,153,213,637,000 | -7,211,783,185,933,930,000 | 39.591549 | 80 | 0.691534 | false |
edcast-inc/edx-platform-edcast
|
common/djangoapps/monitoring/signals.py
|
172
|
4584
|
"""
Add receivers for django signals, and feed data into the monitoring system.
If a model has a class attribute 'METRIC_TAGS' that is a list of strings,
those fields will be retrieved from the model instance, and added as tags to
the recorded metrics.
"""
from django.db.models.signals import post_save, post_delete, m2m_changed, post_init
from django.dispatch import receiver
import dogstats_wrapper as dog_stats_api
def _database_tags(action, sender, kwargs):
"""
Return a tags for the sender and database used in django.db.models signals.
Arguments:
action (str): What action is being performed on the db model.
sender (Model): What model class is the action being performed on.
kwargs (dict): The kwargs passed by the model signal.
"""
tags = _model_tags(kwargs, 'instance')
tags.append(u'action:{}'.format(action))
if 'using' in kwargs:
tags.append(u'database:{}'.format(kwargs['using']))
return tags
def _model_tags(kwargs, key):
"""
Return a list of all tags for all attributes in kwargs[key].MODEL_TAGS,
plus a tag for the model class.
"""
if key not in kwargs:
return []
instance = kwargs[key]
tags = [
u'{}.{}:{}'.format(key, attr, getattr(instance, attr))
for attr in getattr(instance, 'MODEL_TAGS', [])
]
tags.append(u'model_class:{}'.format(instance.__class__.__name__))
return tags
@receiver(post_init, dispatch_uid='edxapp.monitoring.post_init_metrics')
def post_init_metrics(sender, **kwargs):
"""
Record the number of times that django models are instantiated.
Args:
sender (Model): The model class sending the signals.
using (str): The name of the database being used for this initialization (optional).
instance (Model instance): The instance being initialized (optional).
"""
tags = _database_tags('initialized', sender, kwargs)
dog_stats_api.increment('edxapp.db.model', tags=tags)
@receiver(post_save, dispatch_uid='edxapp.monitoring.post_save_metrics')
def post_save_metrics(sender, **kwargs):
"""
Record the number of times that django models are saved (created or updated).
Args:
sender (Model): The model class sending the signals.
using (str): The name of the database being used for this update (optional).
instance (Model instance): The instance being updated (optional).
"""
action = 'created' if kwargs.pop('created', False) else 'updated'
tags = _database_tags(action, sender, kwargs)
dog_stats_api.increment('edxapp.db.model', tags=tags)
@receiver(post_delete, dispatch_uid='edxapp.monitoring.post_delete_metrics')
def post_delete_metrics(sender, **kwargs):
"""
Record the number of times that django models are deleted.
Args:
sender (Model): The model class sending the signals.
using (str): The name of the database being used for this deletion (optional).
instance (Model instance): The instance being deleted (optional).
"""
tags = _database_tags('deleted', sender, kwargs)
dog_stats_api.increment('edxapp.db.model', tags=tags)
@receiver(m2m_changed, dispatch_uid='edxapp.monitoring.m2m_changed_metrics')
def m2m_changed_metrics(sender, **kwargs):
"""
Record the number of times that Many2Many fields are updated. This is separated
from post_save and post_delete, because it's signaled by the database model in
the middle of the Many2Many relationship, rather than either of the models
that are the relationship participants.
Args:
sender (Model): The model class in the middle of the Many2Many relationship.
action (str): The action being taken on this Many2Many relationship.
using (str): The name of the database being used for this deletion (optional).
instance (Model instance): The instance whose many-to-many relation is being modified.
model (Model class): The model of the class being added/removed/cleared from the relation.
"""
if 'action' not in kwargs:
return
action = {
'post_add': 'm2m.added',
'post_remove': 'm2m.removed',
'post_clear': 'm2m.cleared',
}.get(kwargs['action'])
if not action:
return
tags = _database_tags(action, sender, kwargs)
if 'model' in kwargs:
tags.append('target_class:{}'.format(kwargs['model'].__name__))
pk_set = kwargs.get('pk_set', []) or []
dog_stats_api.increment(
'edxapp.db.model',
value=len(pk_set),
tags=tags
)
|
agpl-3.0
| 7,455,059,071,995,350,000 | -6,098,185,265,093,177,000 | 32.955556 | 98 | 0.669503 | false |
kenshay/ImageScripter
|
ProgramData/SystemFiles/Python/Lib/site-packages/OpenGL/GL/INTEL/map_texture.py
|
9
|
1716
|
'''OpenGL extension INTEL.map_texture
This module customises the behaviour of the
OpenGL.raw.GL.INTEL.map_texture to provide a more
Python-friendly API
Overview (from the spec)
Systems with integrated GPUs can share the same physical memory between CPU
and GPU. This feature, if exposed by API, can bring significant performance
benefits for graphics applications by reducing the complexity of
uploading/accessing texture contents. This extension enables CPU direct
access to the GPU memory holding textures.
The problem with texture memory directly exposed to clients is that
textures are often 'tiled'. Texels are kept in specific layout to improve
locality of reference and thus performance of texturing. This 'tiling'
is specific to particular hardware and would be thus difficult to use.
This extension allows to create textures with 'linear' layout which allows
for simplified access on user side (potentially sacrificing some
performance during texture sampling).
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/INTEL/map_texture.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.INTEL.map_texture import *
from OpenGL.raw.GL.INTEL.map_texture import _EXTENSION_NAME
def glInitMapTextureINTEL():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
glMapTexture2DINTEL=wrapper.wrapper(glMapTexture2DINTEL).setInputArraySize(
'stride', 1
).setInputArraySize(
'layout', 1
)
### END AUTOGENERATED SECTION
|
gpl-3.0
| -5,051,042,947,728,171,000 | -8,780,543,930,287,341,000 | 38.930233 | 76 | 0.798368 | false |
analyseuc3m/ANALYSE-v1
|
lms/djangoapps/mobile_api/course_info/views.py
|
3
|
3753
|
"""
Views for course info API
"""
from django.http import Http404
from rest_framework import generics
from rest_framework.response import Response
from courseware.courses import get_course_info_section_module
from static_replace import make_static_urls_absolute
from openedx.core.lib.xblock_utils import get_course_update_items
from ..utils import mobile_view, mobile_course_access
@mobile_view()
class CourseUpdatesList(generics.ListAPIView):
"""
**Use Case**
Get the content for course updates.
**Example Request**
GET /api/mobile/v0.5/course_info/{course_id}/updates
**Response Values**
If the request is successful, the request returns an HTTP 200 "OK"
response along with an array of course updates. Each course update
contains the following values.
* content: The content, as an HTML string, of the course update.
* date: The date of the course update.
* id: The unique identifier of the update.
* status: Whether the update is visible or not.
"""
@mobile_course_access()
def list(self, request, course, *args, **kwargs):
course_updates_module = get_course_info_section_module(request, request.user, course, 'updates')
update_items = get_course_update_items(course_updates_module)
updates_to_show = [
update for update in update_items
if update.get("status") != "deleted"
]
for item in updates_to_show:
item['content'] = apply_wrappers_to_content(item['content'], course_updates_module, request)
return Response(updates_to_show)
@mobile_view()
class CourseHandoutsList(generics.ListAPIView):
"""
**Use Case**
Get the HTML for course handouts.
**Example Request**
GET /api/mobile/v0.5/course_info/{course_id}/handouts
**Response Values**
If the request is successful, the request returns an HTTP 200 "OK"
response along with the following value.
* handouts_html: The HTML for course handouts.
"""
@mobile_course_access()
def list(self, request, course, *args, **kwargs):
course_handouts_module = get_course_info_section_module(request, request.user, course, 'handouts')
if course_handouts_module:
if course_handouts_module.data == "<ol></ol>":
handouts_html = None
else:
handouts_html = apply_wrappers_to_content(course_handouts_module.data, course_handouts_module, request)
return Response({'handouts_html': handouts_html})
else:
# course_handouts_module could be None if there are no handouts
return Response({'handouts_html': None})
def apply_wrappers_to_content(content, module, request):
"""
Updates a piece of html content with the filter functions stored in its module system, then replaces any
static urls with absolute urls.
Args:
content: The html content to which to apply the content wrappers generated for this module system.
module: The module containing a reference to the module system which contains functions to apply to the
content. These functions include:
* Replacing static url's
* Replacing course url's
* Replacing jump to id url's
request: The request, used to replace static URLs with absolute URLs.
Returns: A piece of html content containing the original content updated by each wrapper.
"""
content = module.system.replace_urls(content)
content = module.system.replace_course_urls(content)
content = module.system.replace_jump_to_id_urls(content)
return make_static_urls_absolute(request, content)
|
agpl-3.0
| 4,895,020,521,575,867,000 | -1,638,354,710,322,828,800 | 33.75 | 119 | 0.667999 | false |
pankajnits/vyked
|
vyked/registry_client.py
|
2
|
6600
|
import asyncio
import logging
import random
from collections import defaultdict
from again.utils import unique_hex
from functools import partial
from retrial.retrial import retry
from .packet import ControlPacket
from .protocol_factory import get_vyked_protocol
from .pinger import TCPPinger
def _retry_for_result(result):
if isinstance(result, tuple):
return not isinstance(result[0], asyncio.transports.Transport) or not isinstance(result[1], asyncio.Protocol)
return True
def _retry_for_exception(_):
return True
class RegistryClient:
logger = logging.getLogger(__name__)
def __init__(self, loop, host, port):
self._loop = loop
self._port = port
self._host = host
self.bus = None
self._service_host = None
self._service_port = None
self._transport = None
self._protocol = None
self._service = None
self._version = None
self._node_id = None
self._pinger = None
self._pending_requests = {}
self._available_services = defaultdict(list)
self._assigned_services = defaultdict(lambda: defaultdict(list))
def register(self, ip, port, service, version, vendors, service_type):
self._service_host = ip
self._service_port = port
self._service = service
self._version = version
self._node_id = '{}_{}_{}'.format(service, version, unique_hex())
packet = ControlPacket.registration(ip, port, self._node_id, service, version, vendors, service_type)
self._protocol.send(packet)
def get_instances(self, service, version):
packet = ControlPacket.get_instances(service, version)
future = asyncio.Future()
self._protocol.send(packet)
self._pending_requests[packet['request_id']] = future
return future
def get_subscribers(self, service, version, endpoint):
packet = ControlPacket.get_subscribers(service, version, endpoint)
# TODO : remove duplication in get_instances and get_subscribers
future = asyncio.Future()
self._protocol.send(packet)
self._pending_requests[packet['request_id']] = future
return future
def x_subscribe(self, endpoints):
packet = ControlPacket.xsubscribe(self._service, self._version, self._service_host, self._service_port,
self._node_id,
endpoints)
self._protocol.send(packet)
@retry(should_retry_for_result=_retry_for_result, should_retry_for_exception=_retry_for_exception,
strategy=[0, 2, 4, 8, 16, 32])
def connect(self):
self._transport, self._protocol = yield from self._loop.create_connection(partial(get_vyked_protocol, self),
self._host, self._port)
self._pinger = TCPPinger('registry', self._protocol, self)
self._pinger.ping()
return self._transport, self._protocol
def on_timeout(self, node_id):
asyncio.async(self.connect())
def receive(self, packet: dict, protocol, transport):
if packet['type'] == 'registered':
self.cache_vendors(packet['params']['vendors'])
self.bus.registration_complete()
elif packet['type'] == 'deregister':
self._handle_deregistration(packet)
elif packet['type'] == 'subscribers':
self._handle_subscriber_packet(packet)
elif packet['type'] == 'pong':
self._pinger.pong_received()
def get_all_addresses(self, full_service_name):
return self._available_services.get(
self._get_full_service_name(full_service_name[0], full_service_name[1]))
def get_for_node(self, node_id):
for services in self._available_services.values():
for host, port, node, service_type in services:
if node == node_id:
return host, port, node, service_type
return None
def get_random_service(self, service_name, service_type):
services = self._available_services[service_name]
services = [service for service in services if service[3] == service_type]
if len(services):
return random.choice(services)
else:
return None
def resolve(self, service: str, version: str, entity: str, service_type: str):
service_name = self._get_full_service_name(service, version)
if entity is not None:
entity_map = self._assigned_services.get(service_name)
if entity_map is None:
self._assigned_services[service_name] = {}
entity_map = self._assigned_services.get(service_name)
if entity in entity_map:
return entity_map[entity]
else:
host, port, node_id, service_type = self.get_random_service(service_name, service_type)
if node_id is not None:
entity_map[entity] = host, port, node_id, service_type
return host, port, node_id, service_type
else:
return self.get_random_service(service_name, service_type)
@staticmethod
def _get_full_service_name(service, version):
return "{}/{}".format(service, version)
def cache_vendors(self, vendors):
for vendor in vendors:
vendor_name = self._get_full_service_name(vendor['name'], vendor['version'])
for address in vendor['addresses']:
self._available_services[vendor_name].append(
(address['host'], address['port'], address['node_id'], address['type']))
def _handle_deregistration(self, packet):
params = packet['params']
vendor = self._get_full_service_name(params['service'], params['version'])
node = params['node_id']
for each in self._available_services[vendor]:
if each[2] == node:
self._available_services[vendor].remove(each)
entity_map = self._assigned_services.get(vendor)
if entity_map is not None:
stale_entities = []
for entity, node_id in entity_map.items():
if node == node_id:
stale_entities.append(entity)
for entity in stale_entities:
entity_map.pop(entity)
def _handle_subscriber_packet(self, packet):
request_id = packet['request_id']
future = self._pending_requests[request_id]
future.set_result(packet['params']['subscribers'])
|
mit
| 532,072,413,673,519,550 | -2,818,068,171,046,381,000 | 39.490798 | 117 | 0.604697 | false |
liang42hao/bokeh
|
bokeh/transforms/image_downsample.py
|
43
|
2158
|
from __future__ import absolute_import
import numpy as np
from ..models import ServerDataSource
try:
import scipy
import scipy.misc
except ImportError as e:
print(e)
def source(**kwargs):
kwargs['transform'] = {'resample':'heatmap',
'global_x_range' : [0, 10],
'global_y_range' : [0, 10],
'global_offset_x' : [0],
'global_offset_y' : [0],
'type' : 'ndarray',
}
kwargs['data'] = {'x': [0],
'y': [0],
'dw' : [10],
'dh' : [10],
}
return ServerDataSource(**kwargs)
def downsample(image, image_x_axis, image_y_axis,
x_bounds, y_bounds, x_resolution, y_resolution):
x_resolution, y_resolution = int(round(x_resolution)), int(round(y_resolution))
x_bounds = [x_bounds.start, x_bounds.end]
y_bounds = [y_bounds.start, y_bounds.end]
x_bounds = np.searchsorted(image_x_axis, x_bounds)
y_bounds = np.searchsorted(image_y_axis, y_bounds)
#y_bounds = image.shape[0] + 1 - y_bounds[::-1]
if x_resolution == 0 or y_resolution == 0:
subset = np.zeros((1,1), dtype=image.dtype)
else:
subset = image[y_bounds[0]:y_bounds[1],
x_bounds[0]:x_bounds[1]]
x_downsample_factor = max(round(subset.shape[1] / x_resolution / 3.), 1)
y_downsample_factor = max(round(subset.shape[0] / y_resolution / 3.), 1)
subset = subset[::x_downsample_factor, ::y_downsample_factor]
subset = scipy.misc.imresize(subset, (x_resolution, y_resolution),
interp='nearest')
bounds = image_x_axis[x_bounds[0]:x_bounds[1]]
dw = np.max(bounds) - np.min(bounds)
bounds = image_y_axis[y_bounds[0]:y_bounds[1]]
dh = np.max(bounds) - np.min(bounds)
return {'data' : {'image': [subset],
'x': [image_x_axis[x_bounds[0]]],
'y': [image_y_axis[y_bounds[0]]],
'dw': [dw],
'dh': [dh],
}
}
|
bsd-3-clause
| -2,661,644,225,466,260,500 | -5,231,854,077,503,933,000 | 34.377049 | 83 | 0.50278 | false |
serverdensity/sd-agent-core-plugins
|
kafka/test_kafka.py
|
1
|
4269
|
# (C) Datadog, Inc. 2010-2017
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
import threading
import time
from types import ListType
import unittest
import os
import mock
# 3p
from nose.plugins.attrib import attr
# project
from aggregator import MetricsAggregator
import logging
log = logging.getLogger('kafka_test')
STATSD_PORT = 8121
LOG_INFO = {
'log_to_event_viewer': False,
'log_to_syslog': False,
'syslog_host': None,
'syslog_port': None,
'log_level': logging.INFO,
'disable_file_logging': True,
'collector_log_file': '/tmp/collector.log',
'forwarder_log_file': '/tmp/forwarder.log',
'dogstatsd_log_file': '/tmp/dogstatsd.log',
'jmxfetch_log_file': '/tmp/sd-agent/jmxfetch.log',
'go-metro_log_file': '/tmp/sd-agent/go-metro.log',
}
with mock.patch('config.get_logging_config', return_value=LOG_INFO):
from jmxfetch import JMXFetch
from dogstatsd import Server
class DummyReporter(threading.Thread):
def __init__(self, metrics_aggregator):
threading.Thread.__init__(self)
self.finished = threading.Event()
self.metrics_aggregator = metrics_aggregator
self.interval = 10
self.metrics = None
self.finished = False
self.start()
def run(self):
while not self.finished:
time.sleep(self.interval)
self.flush()
def flush(self):
metrics = self.metrics_aggregator.flush()
if metrics:
self.metrics = metrics
@attr(requires='kafka')
class TestKafka(unittest.TestCase):
"""Basic Test for kafka integration."""
def setUp(self):
aggregator = MetricsAggregator("test_host")
self.server = Server(aggregator, "localhost", STATSD_PORT)
self.reporter = DummyReporter(aggregator)
self.t1 = threading.Thread(target=self.server.start)
self.t1.start()
confd_path = os.path.join(os.path.dirname(__file__), 'ci/resources/')
self.jmx_daemon = JMXFetch(confd_path, {'dogstatsd_port': STATSD_PORT})
self.t2 = threading.Thread(target=self.jmx_daemon.run)
self.t2.start()
def tearDown(self):
self.server.stop()
self.reporter.finished = True
self.jmx_daemon.terminate()
def testCustomJMXMetric(self):
count = 0
while self.reporter.metrics is None:
time.sleep(1)
count += 1
if count > 25:
raise Exception("No metrics were received in 25 seconds")
metrics = self.reporter.metrics
# expected_tags = ['env:test', 'instance:kafka-172.17.0.1-9999', 'kafka:broker']
self.assertTrue(isinstance(metrics, ListType))
self.assertTrue(len(metrics) > 0)
log.info(metrics)
log.info(len(metrics))
self.assertTrue(
len([t for t in metrics if "jvm." in t['metric'] and "instance:kafka-172.17.0.1-9999" in t['tags']]) >= 13, metrics)
self.assertTrue(
len([t for t in metrics if "kafka.request." in t['metric'] and "instance:kafka-172.17.0.1-9999" in t['tags']]) == 12, metrics)
self.assertTrue(
len([t for t in metrics if "kafka.replication." in t['metric'] and "instance:kafka-172.17.0.1-9999" in t['tags']]) == 6, metrics)
# CLIENT metrics.
# kafka.producer.request_latency_avg
self.assertTrue(
len([t for t in metrics if "kafka.producer." in t['metric'] and "instance:kafka-172.17.0.1-7777" in t['tags']]) == 1, metrics)
# kafka.consumer.fetch_rate, kafka.consumer.max_lag
self.assertTrue(
len([t for t in metrics if "kafka.consumer." in t['metric'] and "instance:kafka-172.17.0.1-7777" in t['tags']]) == 2, metrics)
# self.assertTrue(
# len([t for t in metrics if "kafka.follower." in t['metric'] and "instance:kafka-172.17.0.1-9999" in t['tags']]) > 40, metrics)
# self.assertTrue(
# len([t for t in metrics if "kafka.net." in t['metric'] and "instance:kafka-172.17.0.1-9999" in t['tags']]) > 40, metrics)
# self.assertTrue(
# len([t for t in metrics if "kafka.messages_in" in t['metric'] and "instance:kafka-172.17.0.1-9999" in t['tags']]) > 40, metrics)
|
bsd-3-clause
| 5,811,221,748,718,467,000 | 7,790,237,846,700,815,000 | 33.991803 | 142 | 0.622628 | false |
Tong-Chen/scikit-learn
|
sklearn/tests/test_cross_validation.py
|
4
|
30858
|
"""Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.fixes import unique
from sklearn import cross_validation as cval
from sklearn.base import BaseEstimator
from sklearn.datasets import make_regression
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import explained_variance_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import make_scorer
from sklearn.externals import six
from sklearn.linear_model import Ridge
from sklearn.svm import SVC
class MockListClassifier(BaseEstimator):
"""Dummy classifier to test the cross-validation.
Checks that GridSearchCV didn't convert X to array.
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
assert_true(isinstance(X, list))
return self
def predict(self, T):
return T.shape[0]
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
class MockClassifier(BaseEstimator):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0):
self.a = a
def fit(self, X, Y=None, sample_weight=None, class_prior=None):
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
return self
def predict(self, T):
return T.shape[0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
y = np.arange(10) // 2
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
y = [3, 3, -1, -1, 2]
cv = cval.StratifiedKFold(y, 3)
# checking there was only one warning.
assert_equal(len(w), 1)
# checking it has the right type
assert_equal(w[0].category, Warning)
# checking it's the right warning. This might be a bad test since it's
# a characteristic of the code and not a behavior
assert_true("The least populated class" in str(w[0]))
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for train, test in cval.StratifiedKFold(labels, 5):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10, 2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89, 2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01, 2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
labels = [0] * 3 + [1] * 14
for skf in [cval.StratifiedKFold(labels[:i], 3) for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
sorted_array = np.arange(100)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(101, 200)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(201, 300)
assert_true(np.any(sorted_array != ind[train]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model w.r.t. the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(unique(y[train]), unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(unique(y[train], return_inverse=True)[1]) /
float(len(y[train])))
p_test = (np.bincount(unique(y[test], return_inverse=True)[1]) /
float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
@ignore_warnings
def test_stratified_shuffle_split_iter_no_indices():
y = np.asarray([0, 1, 2] * 10)
sss1 = cval.StratifiedShuffleSplit(y, indices=False, random_state=0)
train_mask, test_mask = next(iter(sss1))
sss2 = cval.StratifiedShuffleSplit(y, indices=True, random_state=0)
train_indices, test_indices = next(iter(sss2))
assert_array_equal(sorted(test_indices), np.where(test_mask)[0])
def test_leave_label_out_changing_labels():
"""Check that LeaveOneLabelOut and LeavePLabelOut work normally if
the labels variable is changed before calling __iter__"""
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X as list
clf = MockListClassifier()
scores = cval.cross_val_score(clf, X.tolist(), y)
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
score = cval.cross_val_score(clf, X, y, score_func=score_func)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = range(10)
split = cval.train_test_split(X, X_s, y)
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# also test deprecated old way
with warnings.catch_warnings(record=True):
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
score_func=f1_score, cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
with warnings.catch_warnings(record=True):
ev_scores = cval.cross_val_score(reg, X, y, cv=5,
score_func=explained_variance_score)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, cv=cv, scoring="accuracy", labels=np.ones(y.size),
random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
scorer = make_scorer(fbeta_score, beta=2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, scoring=scorer, cv=cv, labels=np.ones(y.size),
random_state=0)
assert_almost_equal(score_label, .97, 2)
assert_almost_equal(pvalue_label, 0.01, 3)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(svm, X, y, cv=cv,
scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
# test with deprecated interface
with warnings.catch_warnings(record=True):
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, score_func=accuracy_score, cv=cv)
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_mask():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = assert_warns(DeprecationWarning, cval.LeaveOneOut,
4, indices=False)
lpo = assert_warns(DeprecationWarning, cval.LeavePOut,
4, 2, indices=False)
kf = assert_warns(DeprecationWarning, cval.KFold,
4, 2, indices=False)
skf = assert_warns(DeprecationWarning, cval.StratifiedKFold,
y, 2, indices=False)
lolo = assert_warns(DeprecationWarning, cval.LeaveOneLabelOut,
labels, indices=False)
lopo = assert_warns(DeprecationWarning, cval.LeavePLabelOut,
labels, 2, indices=False)
ss = assert_warns(DeprecationWarning, cval.ShuffleSplit,
4, indices=False)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss]:
for train, test in cv:
assert_equal(np.asarray(train).dtype.kind, 'b')
assert_equal(np.asarray(train).dtype.kind, 'b')
X_train, X_test = X[train], X[test]
y_train, y_test = y[train], y[test]
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = assert_warns(DeprecationWarning, cval.LeaveOneOut,
4, indices=True)
lpo = assert_warns(DeprecationWarning, cval.LeavePOut,
4, 2, indices=True)
kf = assert_warns(DeprecationWarning, cval.KFold,
4, 2, indices=True)
skf = assert_warns(DeprecationWarning, cval.StratifiedKFold,
y, 2, indices=True)
lolo = assert_warns(DeprecationWarning, cval.LeaveOneLabelOut,
labels, indices=True)
lopo = assert_warns(DeprecationWarning, cval.LeavePLabelOut,
labels, 2, indices=True)
b = cval.Bootstrap(2) # only in index mode
ss = assert_warns(DeprecationWarning, cval.ShuffleSplit,
2, indices=True)
for cv in [loo, lpo, kf, skf, lolo, lopo, b, ss]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X_train, X_test = X[train], X[test]
y_train, y_test = y[train], y[test]
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
b = cval.Bootstrap(2) # only in index mode
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, b, ss]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X_train, X_test = X[train], X[test]
y_train, y_test = y[train], y[test]
@ignore_warnings
def test_cross_val_generator_mask_indices_same():
# Test that the cross validation generators return the same results when
# indices=True and when indices=False
y = np.array([0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2])
labels = np.array([1, 1, 2, 3, 3, 3, 4])
loo_mask = cval.LeaveOneOut(5, indices=False)
loo_ind = cval.LeaveOneOut(5, indices=True)
lpo_mask = cval.LeavePOut(10, 2, indices=False)
lpo_ind = cval.LeavePOut(10, 2, indices=True)
kf_mask = cval.KFold(10, 5, indices=False, shuffle=True, random_state=1)
kf_ind = cval.KFold(10, 5, indices=True, shuffle=True, random_state=1)
skf_mask = cval.StratifiedKFold(y, 3, indices=False)
skf_ind = cval.StratifiedKFold(y, 3, indices=True)
lolo_mask = cval.LeaveOneLabelOut(labels, indices=False)
lolo_ind = cval.LeaveOneLabelOut(labels, indices=True)
lopo_mask = cval.LeavePLabelOut(labels, 2, indices=False)
lopo_ind = cval.LeavePLabelOut(labels, 2, indices=True)
for cv_mask, cv_ind in [(loo_mask, loo_ind), (lpo_mask, lpo_ind),
(kf_mask, kf_ind), (skf_mask, skf_ind),
(lolo_mask, lolo_ind), (lopo_mask, lopo_ind)]:
for (train_mask, test_mask), (train_ind, test_ind) in \
zip(cv_mask, cv_ind):
assert_array_equal(np.where(train_mask)[0], train_ind)
assert_array_equal(np.where(test_mask)[0], test_ind)
def test_bootstrap_errors():
assert_raises(ValueError, cval.Bootstrap, 10, train_size=100)
assert_raises(ValueError, cval.Bootstrap, 10, test_size=100)
assert_raises(ValueError, cval.Bootstrap, 10, train_size=1.1)
assert_raises(ValueError, cval.Bootstrap, 10, test_size=1.1)
def test_bootstrap_test_sizes():
assert_equal(cval.Bootstrap(10, test_size=0.2).test_size, 2)
assert_equal(cval.Bootstrap(10, test_size=2).test_size, 2)
assert_equal(cval.Bootstrap(10, test_size=None).test_size, 5)
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
@ignore_warnings
def test_cross_indices_exception():
X = coo_matrix(np.array([[1, 2], [3, 4], [5, 6], [7, 8]]))
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4, indices=False)
lpo = cval.LeavePOut(4, 2, indices=False)
kf = cval.KFold(4, 2, indices=False)
skf = cval.StratifiedKFold(y, 2, indices=False)
lolo = cval.LeaveOneLabelOut(labels, indices=False)
lopo = cval.LeavePLabelOut(labels, 2, indices=False)
assert_raises(ValueError, cval.check_cv, loo, X, y)
assert_raises(ValueError, cval.check_cv, lpo, X, y)
assert_raises(ValueError, cval.check_cv, kf, X, y)
assert_raises(ValueError, cval.check_cv, skf, X, y)
assert_raises(ValueError, cval.check_cv, lolo, X, y)
assert_raises(ValueError, cval.check_cv, lopo, X, y)
|
bsd-3-clause
| -8,752,534,682,886,172,000 | 7,527,967,188,647,646,000 | 38.359694 | 79 | 0.629108 | false |
0-wiz-0/bgfx
|
3rdparty/scintilla/scripts/GenerateCaseConvert.py
|
71
|
4580
|
# Script to generate CaseConvert.cxx from Python's Unicode data
# Should be run rarely when a Python with a new version of Unicode data is available.
# Requires Python 3.3 or later
# Should not be run with old versions of Python.
# Current best approach divides case conversions into two cases:
# simple symmetric and complex.
# Simple symmetric is where a lower and upper case pair convert to each
# other and the folded form is the same as the lower case.
# There are 1006 symmetric pairs.
# These are further divided into ranges (stored as lower, upper, range length,
# range pitch and singletons (stored as lower, upper).
# Complex is for cases that don't fit the above: where there are multiple
# characters in one of the forms or fold is different to lower or
# lower(upper(x)) or upper(lower(x)) are not x. These are represented as UTF-8
# strings with original, folded, upper, and lower separated by '|'.
# There are 126 complex cases.
import codecs, itertools, os, string, sys, unicodedata
from FileGenerator import Regenerate
def contiguousRanges(l, diff):
# l is s list of lists
# group into lists where first element of each element differs by diff
out = [[l[0]]]
for s in l[1:]:
if s[0] != out[-1][-1][0] + diff:
out.append([])
out[-1].append(s)
return out
def flatten(listOfLists):
"Flatten one level of nesting"
return itertools.chain.from_iterable(listOfLists)
def conversionSets():
# For all Unicode characters, see whether they have case conversions
# Return 2 sets: one of simple symmetric conversion cases and another
# with complex cases.
complexes = []
symmetrics = []
for ch in range(sys.maxunicode):
if ch >= 0xd800 and ch <= 0xDBFF:
continue
if ch >= 0xdc00 and ch <= 0xDFFF:
continue
uch = chr(ch)
fold = uch.casefold()
upper = uch.upper()
lower = uch.lower()
symmetric = False
if uch != upper and len(upper) == 1 and uch == lower and uch == fold:
lowerUpper = upper.lower()
foldUpper = upper.casefold()
if lowerUpper == foldUpper and lowerUpper == uch:
symmetric = True
symmetrics.append((ch, ord(upper), ch - ord(upper)))
if uch != lower and len(lower) == 1 and uch == upper and lower == fold:
upperLower = lower.upper()
if upperLower == uch:
symmetric = True
if fold == uch:
fold = ""
if upper == uch:
upper = ""
if lower == uch:
lower = ""
if (fold or upper or lower) and not symmetric:
complexes.append((uch, fold, upper, lower))
return symmetrics, complexes
def groupRanges(symmetrics):
# Group the symmetrics into groups where possible, returning a list
# of ranges and a list of symmetrics that didn't fit into a range
def distance(s):
return s[2]
groups = []
uniquekeys = []
for k, g in itertools.groupby(symmetrics, distance):
groups.append(list(g)) # Store group iterator as a list
uniquekeys.append(k)
contiguousGroups = flatten([contiguousRanges(g, 1) for g in groups])
longGroups = [(x[0][0], x[0][1], len(x), 1) for x in contiguousGroups if len(x) > 4]
oneDiffs = [s for s in symmetrics if s[2] == 1]
contiguousOnes = flatten([contiguousRanges(g, 2) for g in [oneDiffs]])
longOneGroups = [(x[0][0], x[0][1], len(x), 2) for x in contiguousOnes if len(x) > 4]
rangeGroups = sorted(longGroups+longOneGroups, key=lambda s: s[0])
rangeCoverage = list(flatten([range(r[0], r[0]+r[2]*r[3], r[3]) for r in rangeGroups]))
nonRanges = [(l, u) for l, u, d in symmetrics if l not in rangeCoverage]
return rangeGroups, nonRanges
def escape(s):
return "".join((chr(c) if chr(c) in string.ascii_letters else "\\x%x" % c) for c in s.encode('utf-8'))
def updateCaseConvert():
symmetrics, complexes = conversionSets()
rangeGroups, nonRanges = groupRanges(symmetrics)
print(len(rangeGroups), "ranges")
rangeLines = ["%d,%d,%d,%d, " % x for x in rangeGroups]
print(len(nonRanges), "non ranges")
nonRangeLines = ["%d,%d, " % x for x in nonRanges]
print(len(symmetrics), "symmetric")
complexLines = ['"%s|%s|%s|%s|"' % tuple(escape(t) for t in x) for x in complexes]
print(len(complexLines), "complex")
Regenerate("../src/CaseConvert.cxx", "//", rangeLines, nonRangeLines, complexLines)
updateCaseConvert()
|
bsd-2-clause
| -567,048,793,171,492,100 | 7,775,385,741,678,596,000 | 35.349206 | 103 | 0.632751 | false |
cybertk/mffr
|
chromium_tools/mffr.py
|
1
|
6105
|
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Usage: mffr.py [-d] [-g *.h] [-g *.cc] REGEXP REPLACEMENT
This tool performs a fast find-and-replace operation on files in
the current git repository.
The -d flag selects a default set of globs (C++ and Objective-C/C++
source files). The -g flag adds a single glob to the list and may
be used multiple times. If neither -d nor -g is specified, the tool
searches all files (*.*).
REGEXP uses full Python regexp syntax. REPLACEMENT can use
back-references.
"""
__version__ = '1.0.0'
import optparse
import re
import subprocess
import sys
# We need to use shell=True with subprocess on Windows so that it
# finds 'git' from the path, but can lead to undesired behavior on
# Linux.
_USE_SHELL = (sys.platform == 'win32')
def MultiFileFindReplace(original, replacement, file_globs):
"""Implements fast multi-file find and replace.
Given an |original| string and a |replacement| string, find matching
files by running git grep on |original| in files matching any
pattern in |file_globs|.
Once files are found, |re.sub| is run to replace |original| with
|replacement|. |replacement| may use capture group back-references.
Args:
original: '(#(include|import)\s*["<])chrome/browser/ui/browser.h([>"])'
replacement: '\1chrome/browser/ui/browser/browser.h\3'
file_globs: ['*.cc', '*.h', '*.m', '*.mm']
Returns the list of files modified.
Raises an exception on error.
"""
# Posix extended regular expressions do not reliably support the "\s"
# shorthand.
posix_ere_original = re.sub(r"\\s", "[[:space:]]", original)
if sys.platform == 'win32':
posix_ere_original = posix_ere_original.replace('"', '""')
out, err = subprocess.Popen(
['git', 'grep', '-E', '--name-only', posix_ere_original,
'--'] + file_globs,
stdout=subprocess.PIPE,
shell=_USE_SHELL).communicate()
referees = out.splitlines()
for referee in referees:
with open(referee) as f:
original_contents = f.read()
contents = re.sub(original, replacement, original_contents)
if contents == original_contents:
raise Exception('No change in file %s although matched in grep' %
referee)
with open(referee, 'wb') as f:
f.write(contents)
return referees
def main():
parser = optparse.OptionParser(usage='''
(1) %prog <options> REGEXP REPLACEMENT
REGEXP uses full Python regexp syntax. REPLACEMENT can use back-references.
(2) %prog <options> -i <file>
<file> should contain a list (in Python syntax) of
[REGEXP, REPLACEMENT, [GLOBS]] lists, e.g.:
[
[r"(foo|bar)", r"\1baz", ["*.cc", "*.h"]],
["54", "42"],
]
As shown above, [GLOBS] can be omitted for a given search-replace list, in which
case the corresponding search-replace will use the globs specified on the
command line.''',
version='%prog ' + __version__)
parser.add_option('-d', action='store_true',
dest='use_default_glob',
help='Perform the change on C++ and Objective-C(++) source '
'and header files.')
parser.add_option('-f', action='store_true',
dest='force_unsafe_run',
help='Perform the run even if there are uncommitted local '
'changes.')
parser.add_option('-g', action='append',
type='string',
default=[],
metavar="<glob>",
dest='user_supplied_globs',
help='Perform the change on the specified glob. Can be '
'specified multiple times, in which case the globs are '
'unioned.')
parser.add_option('-i', "--input_file",
type='string',
action='store',
default='',
metavar="<file>",
dest='input_filename',
help='Read arguments from <file> rather than the command '
'line. NOTE: To be sure of regular expressions being '
'interpreted correctly, use raw strings.')
opts, args = parser.parse_args()
if opts.use_default_glob and opts.user_supplied_globs:
print '"-d" and "-g" cannot be used together'
parser.print_help()
return 1
from_file = opts.input_filename != ""
if (from_file and len(args) != 0) or (not from_file and len(args) != 2):
parser.print_help()
return 1
if not opts.force_unsafe_run:
out, err = subprocess.Popen(['git', 'status', '--porcelain'],
stdout=subprocess.PIPE,
shell=_USE_SHELL).communicate()
if out:
print 'ERROR: This tool does not print any confirmation prompts,'
print 'so you should only run it with a clean staging area and cache'
print 'so that reverting a bad find/replace is as easy as running'
print ' git checkout -- .'
print ''
print 'To override this safeguard, pass the -f flag.'
return 1
global_file_globs = ['*.*']
if opts.use_default_glob:
global_file_globs = ['*.cc', '*.h', '*.m', '*.mm']
elif opts.user_supplied_globs:
global_file_globs = opts.user_supplied_globs
# Construct list of search-replace tasks.
search_replace_tasks = []
if opts.input_filename == '':
original = args[0]
replacement = args[1]
search_replace_tasks.append([original, replacement, global_file_globs])
else:
f = open(opts.input_filename)
search_replace_tasks = eval("".join(f.readlines()))
for task in search_replace_tasks:
if len(task) == 2:
task.append(global_file_globs)
f.close()
for (original, replacement, file_globs) in search_replace_tasks:
print 'File globs: %s' % file_globs
print 'Original: %s' % original
print 'Replacement: %s' % replacement
MultiFileFindReplace(original, replacement, file_globs)
return 0
if __name__ == '__main__':
sys.exit(main())
|
bsd-3-clause
| 2,092,442,104,582,982,000 | -5,134,519,142,441,563,000 | 34.494186 | 80 | 0.618509 | false |
gmt/portage
|
pym/portage/tests/sync/test_sync_local.py
|
2
|
7630
|
# Copyright 2014-2015 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import subprocess
import sys
import textwrap
import time
import portage
from portage import os, shutil, _shell_quote
from portage import _unicode_decode
from portage.const import PORTAGE_PYM_PATH, TIMESTAMP_FORMAT
from portage.process import find_binary
from portage.tests import TestCase
from portage.tests.resolver.ResolverPlayground import ResolverPlayground
from portage.util import ensure_dirs
class SyncLocalTestCase(TestCase):
"""
Test sync with rsync and git, using file:// sync-uri.
"""
def _must_skip(self):
if find_binary("rsync") is None:
return "rsync: command not found"
if find_binary("git") is None:
return "git: command not found"
def testSyncLocal(self):
debug = False
skip_reason = self._must_skip()
if skip_reason:
self.portage_skip = skip_reason
self.assertFalse(True, skip_reason)
return
repos_conf = textwrap.dedent("""
[DEFAULT]
%(default_keys)s
[test_repo]
location = %(EPREFIX)s/var/repositories/test_repo
sync-type = %(sync-type)s
sync-uri = file:/%(EPREFIX)s/var/repositories/test_repo_sync
auto-sync = yes
%(repo_extra_keys)s
""")
profile = {
"eapi": ("5",),
"package.use.stable.mask": ("dev-libs/A flag",)
}
ebuilds = {
"dev-libs/A-0": {}
}
user_config = {
'make.conf': ('FEATURES="metadata-transfer"',)
}
playground = ResolverPlayground(ebuilds=ebuilds,
profile=profile, user_config=user_config, debug=debug)
settings = playground.settings
eprefix = settings["EPREFIX"]
eroot = settings["EROOT"]
homedir = os.path.join(eroot, "home")
distdir = os.path.join(eprefix, "distdir")
repo = settings.repositories["test_repo"]
metadata_dir = os.path.join(repo.location, "metadata")
cmds = {}
for cmd in ("emerge", "emaint"):
cmds[cmd] = (portage._python_interpreter,
"-b", "-Wd", os.path.join(self.bindir, cmd))
git_binary = find_binary("git")
git_cmd = (git_binary,)
committer_name = "Gentoo Dev"
committer_email = "[email protected]"
def repos_set_conf(sync_type, dflt_keys=None, xtra_keys=None):
env["PORTAGE_REPOSITORIES"] = repos_conf % {\
"EPREFIX": eprefix, "sync-type": sync_type,
"default_keys": "" if dflt_keys is None else dflt_keys,
"repo_extra_keys": "" if xtra_keys is None else xtra_keys}
def alter_ebuild():
with open(os.path.join(repo.location + "_sync",
"dev-libs", "A", "A-0.ebuild"), "a") as f:
f.write("\n")
os.unlink(os.path.join(metadata_dir, 'timestamp.chk'))
sync_cmds = (
(homedir, cmds["emerge"] + ("--sync",)),
(homedir, lambda: self.assertTrue(os.path.exists(
os.path.join(repo.location, "dev-libs", "A")
), "dev-libs/A expected, but missing")),
(homedir, cmds["emaint"] + ("sync", "-A")),
)
rename_repo = (
(homedir, lambda: os.rename(repo.location,
repo.location + "_sync")),
)
rsync_opts_repos = (
(homedir, alter_ebuild),
(homedir, lambda: repos_set_conf("rsync", None,
"sync-rsync-extra-opts = --backup --backup-dir=%s" %
_shell_quote(repo.location + "_back"))),
(homedir, cmds['emerge'] + ("--sync",)),
(homedir, lambda: self.assertTrue(os.path.exists(
repo.location + "_back"))),
(homedir, lambda: shutil.rmtree(repo.location + "_back")),
(homedir, lambda: repos_set_conf("rsync")),
)
rsync_opts_repos_default = (
(homedir, alter_ebuild),
(homedir, lambda: repos_set_conf("rsync",
"sync-rsync-extra-opts = --backup --backup-dir=%s" %
_shell_quote(repo.location+"_back"))),
(homedir, cmds['emerge'] + ("--sync",)),
(homedir, lambda: self.assertTrue(os.path.exists(repo.location + "_back"))),
(homedir, lambda: shutil.rmtree(repo.location + "_back")),
(homedir, lambda: repos_set_conf("rsync")),
)
rsync_opts_repos_default_ovr = (
(homedir, alter_ebuild),
(homedir, lambda: repos_set_conf("rsync",
"sync-rsync-extra-opts = --backup --backup-dir=%s" %
_shell_quote(repo.location + "_back_nowhere"),
"sync-rsync-extra-opts = --backup --backup-dir=%s" %
_shell_quote(repo.location + "_back"))),
(homedir, cmds['emerge'] + ("--sync",)),
(homedir, lambda: self.assertTrue(os.path.exists(repo.location + "_back"))),
(homedir, lambda: shutil.rmtree(repo.location + "_back")),
(homedir, lambda: repos_set_conf("rsync")),
)
rsync_opts_repos_default_cancel = (
(homedir, alter_ebuild),
(homedir, lambda: repos_set_conf("rsync",
"sync-rsync-extra-opts = --backup --backup-dir=%s" %
_shell_quote(repo.location + "_back_nowhere"),
"sync-rsync-extra-opts = ")),
(homedir, cmds['emerge'] + ("--sync",)),
(homedir, lambda: self.assertFalse(os.path.exists(repo.location + "_back"))),
(homedir, lambda: repos_set_conf("rsync")),
)
delete_sync_repo = (
(homedir, lambda: shutil.rmtree(
repo.location + "_sync")),
)
git_repo_create = (
(repo.location, git_cmd +
("config", "--global", "user.name", committer_name,)),
(repo.location, git_cmd +
("config", "--global", "user.email", committer_email,)),
(repo.location, git_cmd + ("init-db",)),
(repo.location, git_cmd + ("add", ".")),
(repo.location, git_cmd +
("commit", "-a", "-m", "add whole repo")),
)
sync_type_git = (
(homedir, lambda: repos_set_conf("git")),
)
pythonpath = os.environ.get("PYTHONPATH")
if pythonpath is not None and not pythonpath.strip():
pythonpath = None
if pythonpath is not None and \
pythonpath.split(":")[0] == PORTAGE_PYM_PATH:
pass
else:
if pythonpath is None:
pythonpath = ""
else:
pythonpath = ":" + pythonpath
pythonpath = PORTAGE_PYM_PATH + pythonpath
env = {
"PORTAGE_OVERRIDE_EPREFIX" : eprefix,
"DISTDIR" : distdir,
"GENTOO_COMMITTER_NAME" : committer_name,
"GENTOO_COMMITTER_EMAIL" : committer_email,
"HOME" : homedir,
"PATH" : os.environ["PATH"],
"PORTAGE_GRPNAME" : os.environ["PORTAGE_GRPNAME"],
"PORTAGE_USERNAME" : os.environ["PORTAGE_USERNAME"],
"PYTHONDONTWRITEBYTECODE" : os.environ.get("PYTHONDONTWRITEBYTECODE", ""),
"PYTHONPATH" : pythonpath,
}
repos_set_conf("rsync")
if os.environ.get("SANDBOX_ON") == "1":
# avoid problems from nested sandbox instances
env["FEATURES"] = "-sandbox -usersandbox"
dirs = [homedir, metadata_dir]
try:
for d in dirs:
ensure_dirs(d)
timestamp_path = os.path.join(metadata_dir, 'timestamp.chk')
with open(timestamp_path, 'w') as f:
f.write(time.strftime('%s\n' % TIMESTAMP_FORMAT, time.gmtime()))
if debug:
# The subprocess inherits both stdout and stderr, for
# debugging purposes.
stdout = None
else:
# The subprocess inherits stderr so that any warnings
# triggered by python -Wd will be visible.
stdout = subprocess.PIPE
for cwd, cmd in rename_repo + sync_cmds + \
rsync_opts_repos + rsync_opts_repos_default + \
rsync_opts_repos_default_ovr + rsync_opts_repos_default_cancel + \
delete_sync_repo + git_repo_create + sync_type_git + \
rename_repo + sync_cmds:
if hasattr(cmd, '__call__'):
cmd()
continue
abs_cwd = os.path.join(repo.location, cwd)
proc = subprocess.Popen(cmd,
cwd=abs_cwd, env=env, stdout=stdout)
if debug:
proc.wait()
else:
output = proc.stdout.readlines()
proc.wait()
proc.stdout.close()
if proc.returncode != os.EX_OK:
for line in output:
sys.stderr.write(_unicode_decode(line))
self.assertEqual(os.EX_OK, proc.returncode,
"%s failed in %s" % (cmd, cwd,))
finally:
playground.cleanup()
|
gpl-2.0
| 5,081,931,593,469,937,000 | 237,484,821,510,504,450 | 29.158103 | 80 | 0.641153 | false |
alangwansui/mtl_ordercenter
|
openerp/addons/base/ir/osv_memory_autovacuum.py
|
447
|
1450
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp
class osv_memory_autovacuum(openerp.osv.osv.osv_memory):
""" Expose the osv_memory.vacuum() method to the cron jobs mechanism. """
_name = 'osv_memory.autovacuum'
def power_on(self, cr, uid, context=None):
for model in self.pool.models.values():
if model.is_transient():
model._transient_vacuum(cr, uid, force=True)
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| -6,452,133,530,257,276,000 | -4,650,135,804,543,909,000 | 40.428571 | 78 | 0.617931 | false |
gminds/rapidnewsng
|
django/contrib/gis/tests/test_geoforms.py
|
110
|
3800
|
from django.forms import ValidationError
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.tests.utils import HAS_SPATIALREFSYS
from django.utils import unittest
if HAS_SPATIALREFSYS:
from django.contrib.gis import forms
from django.contrib.gis.geos import GEOSGeometry
@unittest.skipUnless(HAS_GDAL and HAS_SPATIALREFSYS, "GeometryFieldTest needs gdal support and a spatial database")
class GeometryFieldTest(unittest.TestCase):
def test00_init(self):
"Testing GeometryField initialization with defaults."
fld = forms.GeometryField()
for bad_default in ('blah', 3, 'FoO', None, 0):
self.assertRaises(ValidationError, fld.clean, bad_default)
def test01_srid(self):
"Testing GeometryField with a SRID set."
# Input that doesn't specify the SRID is assumed to be in the SRID
# of the input field.
fld = forms.GeometryField(srid=4326)
geom = fld.clean('POINT(5 23)')
self.assertEqual(4326, geom.srid)
# Making the field in a different SRID from that of the geometry, and
# asserting it transforms.
fld = forms.GeometryField(srid=32140)
tol = 0.0000001
xform_geom = GEOSGeometry('POINT (951640.547328465 4219369.26171664)', srid=32140)
# The cleaned geometry should be transformed to 32140.
cleaned_geom = fld.clean('SRID=4326;POINT (-95.363151 29.763374)')
self.assertTrue(xform_geom.equals_exact(cleaned_geom, tol))
def test02_null(self):
"Testing GeometryField's handling of null (None) geometries."
# Form fields, by default, are required (`required=True`)
fld = forms.GeometryField()
self.assertRaises(forms.ValidationError, fld.clean, None)
# Still not allowed if `null=False`.
fld = forms.GeometryField(required=False, null=False)
self.assertRaises(forms.ValidationError, fld.clean, None)
# This will clean None as a geometry (See #10660).
fld = forms.GeometryField(required=False)
self.assertEqual(None, fld.clean(None))
def test03_geom_type(self):
"Testing GeometryField's handling of different geometry types."
# By default, all geometry types are allowed.
fld = forms.GeometryField()
for wkt in ('POINT(5 23)', 'MULTIPOLYGON(((0 0, 0 1, 1 1, 1 0, 0 0)))', 'LINESTRING(0 0, 1 1)'):
self.assertEqual(GEOSGeometry(wkt), fld.clean(wkt))
pnt_fld = forms.GeometryField(geom_type='POINT')
self.assertEqual(GEOSGeometry('POINT(5 23)'), pnt_fld.clean('POINT(5 23)'))
# a WKT for any other geom_type will be properly transformed by `to_python`
self.assertEqual(GEOSGeometry('LINESTRING(0 0, 1 1)'), pnt_fld.to_python('LINESTRING(0 0, 1 1)'))
# but rejected by `clean`
self.assertRaises(forms.ValidationError, pnt_fld.clean, 'LINESTRING(0 0, 1 1)')
def test04_to_python(self):
"""
Testing to_python returns a correct GEOSGeometry object or
a ValidationError
"""
fld = forms.GeometryField()
# to_python returns the same GEOSGeometry for a WKT
for wkt in ('POINT(5 23)', 'MULTIPOLYGON(((0 0, 0 1, 1 1, 1 0, 0 0)))', 'LINESTRING(0 0, 1 1)'):
self.assertEqual(GEOSGeometry(wkt), fld.to_python(wkt))
# but raises a ValidationError for any other string
for wkt in ('POINT(5)', 'MULTI POLYGON(((0 0, 0 1, 1 1, 1 0, 0 0)))', 'BLAH(0 0, 1 1)'):
self.assertRaises(forms.ValidationError, fld.to_python, wkt)
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(GeometryFieldTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
if __name__=="__main__":
run()
|
bsd-3-clause
| 5,155,730,435,159,940,000 | -6,694,631,770,282,486,000 | 42.678161 | 115 | 0.657895 | false |
svn2github/django
|
django/contrib/gis/geos/mutable_list.py
|
405
|
10386
|
# Copyright (c) 2008-2009 Aryeh Leib Taurog, all rights reserved.
# Released under the New BSD license.
"""
This module contains a base type which provides list-style mutations
without specific data storage methods.
See also http://www.aryehleib.com/MutableLists.html
Author: Aryeh Leib Taurog.
"""
class ListMixin(object):
"""
A base class which provides complete list interface.
Derived classes must call ListMixin's __init__() function
and implement the following:
function _get_single_external(self, i):
Return single item with index i for general use.
The index i will always satisfy 0 <= i < len(self).
function _get_single_internal(self, i):
Same as above, but for use within the class [Optional]
Note that if _get_single_internal and _get_single_internal return
different types of objects, _set_list must distinguish
between the two and handle each appropriately.
function _set_list(self, length, items):
Recreate the entire object.
NOTE: items may be a generator which calls _get_single_internal.
Therefore, it is necessary to cache the values in a temporary:
temp = list(items)
before clobbering the original storage.
function _set_single(self, i, value):
Set the single item at index i to value [Optional]
If left undefined, all mutations will result in rebuilding
the object using _set_list.
function __len__(self):
Return the length
int _minlength:
The minimum legal length [Optional]
int _maxlength:
The maximum legal length [Optional]
type or tuple _allowed:
A type or tuple of allowed item types [Optional]
class _IndexError:
The type of exception to be raise on invalid index [Optional]
"""
_minlength = 0
_maxlength = None
_IndexError = IndexError
### Python initialization and special list interface methods ###
def __init__(self, *args, **kwargs):
if not hasattr(self, '_get_single_internal'):
self._get_single_internal = self._get_single_external
if not hasattr(self, '_set_single'):
self._set_single = self._set_single_rebuild
self._assign_extended_slice = self._assign_extended_slice_rebuild
super(ListMixin, self).__init__(*args, **kwargs)
def __getitem__(self, index):
"Get the item(s) at the specified index/slice."
if isinstance(index, slice):
return [self._get_single_external(i) for i in xrange(*index.indices(len(self)))]
else:
index = self._checkindex(index)
return self._get_single_external(index)
def __delitem__(self, index):
"Delete the item(s) at the specified index/slice."
if not isinstance(index, (int, long, slice)):
raise TypeError("%s is not a legal index" % index)
# calculate new length and dimensions
origLen = len(self)
if isinstance(index, (int, long)):
index = self._checkindex(index)
indexRange = [index]
else:
indexRange = range(*index.indices(origLen))
newLen = origLen - len(indexRange)
newItems = ( self._get_single_internal(i)
for i in xrange(origLen)
if i not in indexRange )
self._rebuild(newLen, newItems)
def __setitem__(self, index, val):
"Set the item(s) at the specified index/slice."
if isinstance(index, slice):
self._set_slice(index, val)
else:
index = self._checkindex(index)
self._check_allowed((val,))
self._set_single(index, val)
def __iter__(self):
"Iterate over the items in the list"
for i in xrange(len(self)):
yield self[i]
### Special methods for arithmetic operations ###
def __add__(self, other):
'add another list-like object'
return self.__class__(list(self) + list(other))
def __radd__(self, other):
'add to another list-like object'
return other.__class__(list(other) + list(self))
def __iadd__(self, other):
'add another list-like object to self'
self.extend(list(other))
return self
def __mul__(self, n):
'multiply'
return self.__class__(list(self) * n)
def __rmul__(self, n):
'multiply'
return self.__class__(list(self) * n)
def __imul__(self, n):
'multiply'
if n <= 0:
del self[:]
else:
cache = list(self)
for i in range(n-1):
self.extend(cache)
return self
def __cmp__(self, other):
'cmp'
slen = len(self)
for i in range(slen):
try:
c = cmp(self[i], other[i])
except IndexError:
# must be other is shorter
return 1
else:
# elements not equal
if c: return c
return cmp(slen, len(other))
### Public list interface Methods ###
## Non-mutating ##
def count(self, val):
"Standard list count method"
count = 0
for i in self:
if val == i: count += 1
return count
def index(self, val):
"Standard list index method"
for i in xrange(0, len(self)):
if self[i] == val: return i
raise ValueError('%s not found in object' % str(val))
## Mutating ##
def append(self, val):
"Standard list append method"
self[len(self):] = [val]
def extend(self, vals):
"Standard list extend method"
self[len(self):] = vals
def insert(self, index, val):
"Standard list insert method"
if not isinstance(index, (int, long)):
raise TypeError("%s is not a legal index" % index)
self[index:index] = [val]
def pop(self, index=-1):
"Standard list pop method"
result = self[index]
del self[index]
return result
def remove(self, val):
"Standard list remove method"
del self[self.index(val)]
def reverse(self):
"Standard list reverse method"
self[:] = self[-1::-1]
def sort(self, cmp=cmp, key=None, reverse=False):
"Standard list sort method"
if key:
temp = [(key(v),v) for v in self]
temp.sort(cmp=cmp, key=lambda x: x[0], reverse=reverse)
self[:] = [v[1] for v in temp]
else:
temp = list(self)
temp.sort(cmp=cmp, reverse=reverse)
self[:] = temp
### Private routines ###
def _rebuild(self, newLen, newItems):
if newLen < self._minlength:
raise ValueError('Must have at least %d items' % self._minlength)
if self._maxlength is not None and newLen > self._maxlength:
raise ValueError('Cannot have more than %d items' % self._maxlength)
self._set_list(newLen, newItems)
def _set_single_rebuild(self, index, value):
self._set_slice(slice(index, index + 1, 1), [value])
def _checkindex(self, index, correct=True):
length = len(self)
if 0 <= index < length:
return index
if correct and -length <= index < 0:
return index + length
raise self._IndexError('invalid index: %s' % str(index))
def _check_allowed(self, items):
if hasattr(self, '_allowed'):
if False in [isinstance(val, self._allowed) for val in items]:
raise TypeError('Invalid type encountered in the arguments.')
def _set_slice(self, index, values):
"Assign values to a slice of the object"
try:
iter(values)
except TypeError:
raise TypeError('can only assign an iterable to a slice')
self._check_allowed(values)
origLen = len(self)
valueList = list(values)
start, stop, step = index.indices(origLen)
# CAREFUL: index.step and step are not the same!
# step will never be None
if index.step is None:
self._assign_simple_slice(start, stop, valueList)
else:
self._assign_extended_slice(start, stop, step, valueList)
def _assign_extended_slice_rebuild(self, start, stop, step, valueList):
'Assign an extended slice by rebuilding entire list'
indexList = range(start, stop, step)
# extended slice, only allow assigning slice of same size
if len(valueList) != len(indexList):
raise ValueError('attempt to assign sequence of size %d '
'to extended slice of size %d'
% (len(valueList), len(indexList)))
# we're not changing the length of the sequence
newLen = len(self)
newVals = dict(zip(indexList, valueList))
def newItems():
for i in xrange(newLen):
if i in newVals:
yield newVals[i]
else:
yield self._get_single_internal(i)
self._rebuild(newLen, newItems())
def _assign_extended_slice(self, start, stop, step, valueList):
'Assign an extended slice by re-assigning individual items'
indexList = range(start, stop, step)
# extended slice, only allow assigning slice of same size
if len(valueList) != len(indexList):
raise ValueError('attempt to assign sequence of size %d '
'to extended slice of size %d'
% (len(valueList), len(indexList)))
for i, val in zip(indexList, valueList):
self._set_single(i, val)
def _assign_simple_slice(self, start, stop, valueList):
'Assign a simple slice; Can assign slice of any length'
origLen = len(self)
stop = max(start, stop)
newLen = origLen - stop + start + len(valueList)
def newItems():
for i in xrange(origLen + 1):
if i == start:
for val in valueList:
yield val
if i < origLen:
if i < start or i >= stop:
yield self._get_single_internal(i)
self._rebuild(newLen, newItems())
|
bsd-3-clause
| -268,816,623,193,507,870 | -2,120,079,424,575,261,200 | 32.61165 | 92 | 0.568554 | false |
xfire/pydzen
|
pydzen.py
|
1
|
11601
|
#!/usr/bin/env python
#
# Copyright (C) 2008 Rico Schiekel (fire at downgra dot de)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# vim:syntax=python:sw=4:ts=4:expandtab
import sys
import os
import re
import types
import time
import subprocess
import logging
from optparse import OptionParser
class utils(object):
@staticmethod
def screens(screens = 0):
"""
try to get number of xinerama screens and return a list of screen numbers.
first check if parameter value is > 0, if so, use it.
second check XINERAMA_SCREENS environment variable, which should contain the
number of screens.
if the environment variable is not set, try xrandr to get the number of
connected displays.
if xrandr fails, return one screen. (-> [0])
"""
logger = logging.getLogger('utils')
if screens <= 0:
logger.debug('try to read environment variable "XINERAMA_SCREENS"')
screens = os.environ.get('XINERAMA_SCREENS')
if isinstance(screens, types.StringTypes):
try:
screens = int(screens)
except ValueError:
logger.error('XINERAMA_SCREENS invalid (%s)' % screens)
screens = 0
if not screens:
try:
logger.debug('try to use xrandr to determine number of connected screens')
screens = utils.execute('xrandr')
screens = len(re.findall(" connected ", screens, re.M))
except OSError:
logger.warning('can not execute xrandr')
screens = 1
logger.debug('found %d screens' % screens)
return range(0, screens)
@staticmethod
def parse_app(args, regex_list, value = None):
"""
parse the output on stdout from an application with one or several
regular expressions.
return an dictionary with all matches.
"""
logging.getLogger('utils').debug('parse_app(%s, %s, %s)' % (args, regex_list, value))
return utils.parse(execute(args, value).split('\n'), regex_list)
@staticmethod
def parse_file(path_list, regex_list):
"""
parse one or several files with one or several regular expressions.
return an dictionary with all matches.
"""
logger = logging.getLogger('utils')
logger.debug('parse_file(%s, %s)' % (path_list, regex_list))
if not isinstance(path_list, (types.ListType, types.TupleType)):
path_list = [path_list]
lines = []
for path in path_list:
try:
file = open(path, 'r')
lines.extend(file.readlines())
file.close()
except IOError, e:
logger.exception(e)
return utils.parse(lines, regex_list)
@staticmethod
def parse(lines, regex_list):
"""
parse a list of lines with one or several regular expressions.
matching groups must be named with (?P<name>...).
all matches are returned as dictionary, where the key is the group
name with the (maybe multiple) matches as list.
"""
if not isinstance(regex_list, (types.ListType, types.TupleType)):
regex_list = [regex_list]
ret = {}
for line in lines:
for regex in regex_list:
match = regex.match(line)
if match:
for k, v in match.groupdict().iteritems():
ret.setdefault(k, []).append(v)
return ret
@staticmethod
def pipe(app, **kwargs):
"""
execute an application and return an communication object (returned by
subprocess.Popen(...)).
all parameters in **kwargs will be used as command line parameters for the
application. e.g.
execute('foo', v = True, w = 60, i = '/proc/bar')
-> foo -v -w 60 -i /proc/bar
"""
logger = logging.getLogger('utils')
def _to_param(k, v):
if isinstance(v, types.BooleanType):
return ['-%s' % k]
return ['-%s' % k, '%s' % str(v)]
args = [app]
for k,v in kwargs.iteritems():
if not isinstance(v, types.NoneType):
args.extend(_to_param(k,v))
try:
logger.debug('utils.pipe(%s)' % str(args))
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, close_fds=True)
except OSError, e:
logger.error('can not execute "%s": %s' % (app, e))
sys.exit(1)
return p
@staticmethod
def execute(app, value = None, **kwargs):
"""
execute an application 'app'. if 'value' is unequal None, then it's send
via stdin to the application.
all parameters in **kwargs will be used as command line parameters for the
application. e.g.
execute('foo', v = True, w = 60, i = '/proc/bar')
-> foo -v -w 60 -i /proc/bar
all output on stdout generated by the application is returned.
if an error occurs, pydzen will be terminated.
"""
logger = logging.getLogger('utils')
# if not value: value = ''
p = utils.pipe(app, **kwargs)
if value:
out, err = p.communicate(str(value))
else:
out, err = p.communicate()
if err:
logger.error('execute: error: %s' % err)
sys.exit(1)
return out
@staticmethod
def dzen(**kwargs):
"""
return an communication object (returned by subprocess.Popen(...))
to an dzen instance.
all parameters from **kwargs overwrite the default parameters in
config.DZEN_OPTIONS.
"""
args = config.DZEN_OPTIONS.copy()
args.update(kwargs)
return utils.pipe(config.DZEN, **args)
@staticmethod
def gdbar(value, **kwargs):
"""
execute gdbar and return the generated string.
all parameters from **kwargs overwrite the default parameters in
config.GDBAR_OPTIONS.
"""
args = config.GDBAR_OPTIONS.copy()
args.update(kwargs)
return utils.execute(config.GDBAR, value, **args)
@staticmethod
def cache(timeout):
"""
decorator, to cache the return value of an function for several
seconds.
"""
def wrapper(f, cache={}):
def nfunc():
key = f
if key not in cache:
cache[key] = [f(), time.time()]
elif (time.time() - cache[key][1]) >= timeout:
cache[key] = [f(), time.time()]
return cache[key][0]
return nfunc
return wrapper
def load_plugins():
"""
try to load plugins from 'config.PLUGIN_DIR'.
each plugin must define an 'update' method, which returns either a string, an array
of strings or None.
"""
logger = logging.getLogger('pydzen')
sys.path.insert(0, os.path.expanduser(config.PLUGIN_DIR))
plugins = []
for p in config.PLUGINS:
try:
plugin = __import__(p, {}, {}, '*')
if hasattr(plugin, 'update'):
logger.debug('load plugin: "%s"' % p)
plugins.append(plugin)
else:
logger.warning('invalid plugin "%s": no update() function specified' % p)
except ImportError, e:
logger.error('error loading plugin "%s": %s' % (p, e))
sys.path = sys.path[1:]
return plugins
def init_logger():
logging.basicConfig(level = config.LOGLEVEL,
format = '%(asctime)s %(name)-8s %(levelname)-6s %(message)s')
def read_config_file(file, **defaults):
"""
try to read the configuration file "file".
this is a normal python file, which defines several variables. these variables are
then accessable through the ConfigWrapper object as normal member variables.
**defaults are default configuration variables, which might be overwritten.
"""
config = defaults.copy()
try:
execfile(os.path.expanduser(file), {}, config)
except StandardError, e:
print 'Invalid configuration file: %s' % e
sys.exit(1)
class _ConfigWrapper(dict):
def __init__(self, *args, **kwargs):
super(_ConfigWrapper, self).__init__(*args, **kwargs)
def __getattr__(self, name):
return self[name]
def __setattr__(self, name, value):
self[name] = value
return self[name]
return _ConfigWrapper(config)
def configure():
"""
parse command line parameters, then read config file and return
an configuration object.
"""
parser = OptionParser()
parser.add_option('-c', '--config', dest = 'CONFIG_FILE',
help = 'specify an alternate pydzenrc file')
parser.add_option('-p', '--plugins', dest = 'PLUGIN_DIR',
help = 'specify an alternate plugin directory')
parser.add_option('-s', '--screens', dest = 'SCREENS', type = 'int',
help = 'number of Xinerama screen')
parser.set_defaults(CONFIG_FILE = '~/.pydzen/pydzenrc',
PLUGIN_DIR = '~/.pydzen',
SCREENS = 0)
(options, args) = parser.parse_args()
config = read_config_file(options.CONFIG_FILE,
PLUGINS = [],
LOGLEVEL = logging.WARN,
SCREENS = options.SCREENS,
PLUGIN_DIR = options.PLUGIN_DIR)
return config
config = configure()
if __name__ == '__main__':
init_logger()
logger = logging.getLogger('pydzen')
plugins = load_plugins()
dzens = [utils.dzen(xs = i + 1) for i in utils.screens(config.SCREENS)]
try:
while True:
lines = []
for p in plugins:
values = p.update()
if values:
if not isinstance(values, (types.ListType, types.TupleType)):
values = [values]
for i, value in enumerate(values):
if len(lines) < (i + 1):
lines.append([])
if value:
lines[i].append(value)
lines = [config.JOINTS.join(line) for line in lines if line]
lines = '\n'.join(lines) + '\n'
for d in dzens:
d.stdin.write(lines)
del lines
time.sleep(1)
except IOError, e:
try:
logger.error(d.stderr.read())
except StandardError, se:
logger.error(se)
except StandardError, e:
logger.error(e)
except KeyboardInterrupt:
pass
|
gpl-2.0
| 2,974,475,515,769,504,300 | 8,780,249,242,667,540,000 | 33.120588 | 125 | 0.560383 | false |
ahoyosid/scikit-learn
|
examples/applications/plot_stock_market.py
|
227
|
8284
|
"""
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux [email protected]
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import finance
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonnably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WBA': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
|
bsd-3-clause
| 8,009,398,535,473,718,000 | -605,500,444,414,428,900 | 31.359375 | 79 | 0.623008 | false |
a13m/ansible
|
lib/ansible/runner/lookup_plugins/items.py
|
166
|
1405
|
# (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.utils import safe_eval
import ansible.utils as utils
import ansible.errors as errors
def flatten(terms):
ret = []
for term in terms:
if isinstance(term, list):
ret.extend(term)
else:
ret.append(term)
return ret
class LookupModule(object):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
def run(self, terms, inject=None, **kwargs):
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
if not isinstance(terms, list) and not isinstance(terms,set):
raise errors.AnsibleError("with_items expects a list or a set")
return flatten(terms)
|
gpl-3.0
| -4,861,479,630,198,056,000 | -2,729,079,453,668,629,500 | 30.931818 | 79 | 0.698221 | false |
vishnugonela/boto
|
boto/pyami/config.py
|
95
|
8016
|
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011 Chris Moyer http://coredumped.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import os
import re
import warnings
import boto
from boto.compat import expanduser, ConfigParser, StringIO
# By default we use two locations for the boto configurations,
# /etc/boto.cfg and ~/.boto (which works on Windows and Unix).
BotoConfigPath = '/etc/boto.cfg'
BotoConfigLocations = [BotoConfigPath]
UserConfigPath = os.path.join(expanduser('~'), '.boto')
BotoConfigLocations.append(UserConfigPath)
# If there's a BOTO_CONFIG variable set, we load ONLY
# that variable
if 'BOTO_CONFIG' in os.environ:
BotoConfigLocations = [expanduser(os.environ['BOTO_CONFIG'])]
# If there's a BOTO_PATH variable set, we use anything there
# as the current configuration locations, split with os.pathsep.
elif 'BOTO_PATH' in os.environ:
BotoConfigLocations = []
for path in os.environ['BOTO_PATH'].split(os.pathsep):
BotoConfigLocations.append(expanduser(path))
class Config(ConfigParser):
def __init__(self, path=None, fp=None, do_load=True):
# We don't use ``super`` here, because ``ConfigParser`` still uses
# old-style classes.
ConfigParser.__init__(self, {'working_dir': '/mnt/pyami',
'debug': '0'})
if do_load:
if path:
self.load_from_path(path)
elif fp:
self.readfp(fp)
else:
self.read(BotoConfigLocations)
if "AWS_CREDENTIAL_FILE" in os.environ:
full_path = expanduser(os.environ['AWS_CREDENTIAL_FILE'])
try:
self.load_credential_file(full_path)
except IOError:
warnings.warn('Unable to load AWS_CREDENTIAL_FILE (%s)' % full_path)
def load_credential_file(self, path):
"""Load a credential file as is setup like the Java utilities"""
c_data = StringIO()
c_data.write("[Credentials]\n")
for line in open(path, "r").readlines():
c_data.write(line.replace("AWSAccessKeyId", "aws_access_key_id").replace("AWSSecretKey", "aws_secret_access_key"))
c_data.seek(0)
self.readfp(c_data)
def load_from_path(self, path):
file = open(path)
for line in file.readlines():
match = re.match("^#import[\s\t]*([^\s^\t]*)[\s\t]*$", line)
if match:
extended_file = match.group(1)
(dir, file) = os.path.split(path)
self.load_from_path(os.path.join(dir, extended_file))
self.read(path)
def save_option(self, path, section, option, value):
"""
Write the specified Section.Option to the config file specified by path.
Replace any previous value. If the path doesn't exist, create it.
Also add the option the the in-memory config.
"""
config = ConfigParser()
config.read(path)
if not config.has_section(section):
config.add_section(section)
config.set(section, option, value)
fp = open(path, 'w')
config.write(fp)
fp.close()
if not self.has_section(section):
self.add_section(section)
self.set(section, option, value)
def save_user_option(self, section, option, value):
self.save_option(UserConfigPath, section, option, value)
def save_system_option(self, section, option, value):
self.save_option(BotoConfigPath, section, option, value)
def get_instance(self, name, default=None):
try:
val = self.get('Instance', name)
except:
val = default
return val
def get_user(self, name, default=None):
try:
val = self.get('User', name)
except:
val = default
return val
def getint_user(self, name, default=0):
try:
val = self.getint('User', name)
except:
val = default
return val
def get_value(self, section, name, default=None):
return self.get(section, name, default)
def get(self, section, name, default=None):
try:
val = ConfigParser.get(self, section, name)
except:
val = default
return val
def getint(self, section, name, default=0):
try:
val = ConfigParser.getint(self, section, name)
except:
val = int(default)
return val
def getfloat(self, section, name, default=0.0):
try:
val = ConfigParser.getfloat(self, section, name)
except:
val = float(default)
return val
def getbool(self, section, name, default=False):
if self.has_option(section, name):
val = self.get(section, name)
if val.lower() == 'true':
val = True
else:
val = False
else:
val = default
return val
def setbool(self, section, name, value):
if value:
self.set(section, name, 'true')
else:
self.set(section, name, 'false')
def dump(self):
s = StringIO()
self.write(s)
print(s.getvalue())
def dump_safe(self, fp=None):
if not fp:
fp = StringIO()
for section in self.sections():
fp.write('[%s]\n' % section)
for option in self.options(section):
if option == 'aws_secret_access_key':
fp.write('%s = xxxxxxxxxxxxxxxxxx\n' % option)
else:
fp.write('%s = %s\n' % (option, self.get(section, option)))
def dump_to_sdb(self, domain_name, item_name):
from boto.compat import json
sdb = boto.connect_sdb()
domain = sdb.lookup(domain_name)
if not domain:
domain = sdb.create_domain(domain_name)
item = domain.new_item(item_name)
item.active = False
for section in self.sections():
d = {}
for option in self.options(section):
d[option] = self.get(section, option)
item[section] = json.dumps(d)
item.save()
def load_from_sdb(self, domain_name, item_name):
from boto.compat import json
sdb = boto.connect_sdb()
domain = sdb.lookup(domain_name)
item = domain.get_item(item_name)
for section in item.keys():
if not self.has_section(section):
self.add_section(section)
d = json.loads(item[section])
for attr_name in d.keys():
attr_value = d[attr_name]
if attr_value is None:
attr_value = 'None'
if isinstance(attr_value, bool):
self.setbool(section, attr_name, attr_value)
else:
self.set(section, attr_name, attr_value)
|
mit
| -1,171,480,325,551,838,200 | -4,462,869,675,742,848,000 | 34.626667 | 126 | 0.589321 | false |
eRestin/MezzGIS
|
mezzanine/forms/tests.py
|
6
|
1157
|
from __future__ import unicode_literals
from django.test import TestCase
from mezzanine.core.models import CONTENT_STATUS_PUBLISHED
from mezzanine.forms import fields
from mezzanine.forms.models import Form
class TestsForm(TestCase):
def test_forms(self):
"""
Simple 200 status check against rendering and posting to forms
with both optional and required fields.
"""
for required in (True, False):
form = Form.objects.create(title="Form",
status=CONTENT_STATUS_PUBLISHED)
for (i, (field, _)) in enumerate(fields.NAMES):
form.fields.create(label="Field %s" % i, field_type=field,
required=required, visible=True)
response = self.client.get(form.get_absolute_url())
self.assertEqual(response.status_code, 200)
visible_fields = form.fields.visible()
data = dict([("field_%s" % f.id, "test") for f in visible_fields])
response = self.client.post(form.get_absolute_url(), data=data)
self.assertEqual(response.status_code, 200)
|
bsd-2-clause
| 450,604,479,133,714,600 | -347,894,522,246,667,400 | 40.321429 | 78 | 0.611063 | false |
hashems/Mobile-Cloud-Development-Projects
|
appengine/flexible/endpoints/main.py
|
10
|
2861
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Endpoints sample application.
Demonstrates how to create a simple echo API as well as how to deal with
various authentication methods.
"""
import base64
import json
import logging
from flask import Flask, jsonify, request
from flask_cors import cross_origin
from six.moves import http_client
app = Flask(__name__)
def _base64_decode(encoded_str):
# Add paddings manually if necessary.
num_missed_paddings = 4 - len(encoded_str) % 4
if num_missed_paddings != 4:
encoded_str += b'=' * num_missed_paddings
return base64.b64decode(encoded_str).decode('utf-8')
@app.route('/echo', methods=['POST'])
def echo():
"""Simple echo service."""
message = request.get_json().get('message', '')
return jsonify({'message': message})
def auth_info():
"""Retrieves the authenication information from Google Cloud Endpoints."""
encoded_info = request.headers.get('X-Endpoint-API-UserInfo', None)
if encoded_info:
info_json = _base64_decode(encoded_info)
user_info = json.loads(info_json)
else:
user_info = {'id': 'anonymous'}
return jsonify(user_info)
@app.route('/auth/info/googlejwt', methods=['GET'])
def auth_info_google_jwt():
"""Auth info with Google signed JWT."""
return auth_info()
@app.route('/auth/info/googleidtoken', methods=['GET'])
def auth_info_google_id_token():
"""Auth info with Google ID token."""
return auth_info()
@app.route('/auth/info/firebase', methods=['GET'])
@cross_origin(send_wildcard=True)
def auth_info_firebase():
"""Auth info with Firebase auth."""
return auth_info()
@app.errorhandler(http_client.INTERNAL_SERVER_ERROR)
def unexpected_error(e):
"""Handle exceptions by returning swagger-compliant json."""
logging.exception('An error occured while processing the request.')
response = jsonify({
'code': http_client.INTERNAL_SERVER_ERROR,
'message': 'Exception: {}'.format(e)})
response.status_code = http_client.INTERNAL_SERVER_ERROR
return response
if __name__ == '__main__':
# This is used when running locally. Gunicorn is used to run the
# application on Google App Engine. See entrypoint in app.yaml.
app.run(host='127.0.0.1', port=8080, debug=True)
|
apache-2.0
| -3,782,424,439,720,802,000 | 408,182,500,722,794,800 | 29.43617 | 78 | 0.695561 | false |
joerocklin/gem5
|
src/cpu/o3/O3CPU.py
|
4
|
6894
|
# Copyright (c) 2005-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Kevin Lim
from m5.defines import buildEnv
from m5.params import *
from m5.proxy import *
from BaseCPU import BaseCPU
from FUPool import *
from O3Checker import O3Checker
from BranchPredictor import BranchPredictor
class DerivO3CPU(BaseCPU):
type = 'DerivO3CPU'
cxx_header = 'cpu/o3/deriv.hh'
@classmethod
def memory_mode(cls):
return 'timing'
@classmethod
def require_caches(cls):
return True
@classmethod
def support_take_over(cls):
return True
activity = Param.Unsigned(0, "Initial count")
cachePorts = Param.Unsigned(200, "Cache Ports")
decodeToFetchDelay = Param.Cycles(1, "Decode to fetch delay")
renameToFetchDelay = Param.Cycles(1 ,"Rename to fetch delay")
iewToFetchDelay = Param.Cycles(1, "Issue/Execute/Writeback to fetch "
"delay")
commitToFetchDelay = Param.Cycles(1, "Commit to fetch delay")
fetchWidth = Param.Unsigned(8, "Fetch width")
renameToDecodeDelay = Param.Cycles(1, "Rename to decode delay")
iewToDecodeDelay = Param.Cycles(1, "Issue/Execute/Writeback to decode "
"delay")
commitToDecodeDelay = Param.Cycles(1, "Commit to decode delay")
fetchToDecodeDelay = Param.Cycles(1, "Fetch to decode delay")
decodeWidth = Param.Unsigned(8, "Decode width")
iewToRenameDelay = Param.Cycles(1, "Issue/Execute/Writeback to rename "
"delay")
commitToRenameDelay = Param.Cycles(1, "Commit to rename delay")
decodeToRenameDelay = Param.Cycles(1, "Decode to rename delay")
renameWidth = Param.Unsigned(8, "Rename width")
commitToIEWDelay = Param.Cycles(1, "Commit to "
"Issue/Execute/Writeback delay")
renameToIEWDelay = Param.Cycles(2, "Rename to "
"Issue/Execute/Writeback delay")
issueToExecuteDelay = Param.Cycles(1, "Issue to execute delay (internal "
"to the IEW stage)")
dispatchWidth = Param.Unsigned(8, "Dispatch width")
issueWidth = Param.Unsigned(8, "Issue width")
wbWidth = Param.Unsigned(8, "Writeback width")
wbDepth = Param.Unsigned(1, "Writeback depth")
fuPool = Param.FUPool(DefaultFUPool(), "Functional Unit pool")
iewToCommitDelay = Param.Cycles(1, "Issue/Execute/Writeback to commit "
"delay")
renameToROBDelay = Param.Cycles(1, "Rename to reorder buffer delay")
commitWidth = Param.Unsigned(8, "Commit width")
squashWidth = Param.Unsigned(8, "Squash width")
trapLatency = Param.Cycles(13, "Trap latency")
fetchTrapLatency = Param.Cycles(1, "Fetch trap latency")
backComSize = Param.Unsigned(5, "Time buffer size for backwards communication")
forwardComSize = Param.Unsigned(5, "Time buffer size for forward communication")
LQEntries = Param.Unsigned(32, "Number of load queue entries")
SQEntries = Param.Unsigned(32, "Number of store queue entries")
LSQDepCheckShift = Param.Unsigned(4, "Number of places to shift addr before check")
LSQCheckLoads = Param.Bool(True,
"Should dependency violations be checked for loads & stores or just stores")
store_set_clear_period = Param.Unsigned(250000,
"Number of load/store insts before the dep predictor should be invalidated")
LFSTSize = Param.Unsigned(1024, "Last fetched store table size")
SSITSize = Param.Unsigned(1024, "Store set ID table size")
numRobs = Param.Unsigned(1, "Number of Reorder Buffers");
numPhysIntRegs = Param.Unsigned(256, "Number of physical integer registers")
numPhysFloatRegs = Param.Unsigned(256, "Number of physical floating point "
"registers")
numIQEntries = Param.Unsigned(64, "Number of instruction queue entries")
numROBEntries = Param.Unsigned(192, "Number of reorder buffer entries")
smtNumFetchingThreads = Param.Unsigned(1, "SMT Number of Fetching Threads")
smtFetchPolicy = Param.String('SingleThread', "SMT Fetch policy")
smtLSQPolicy = Param.String('Partitioned', "SMT LSQ Sharing Policy")
smtLSQThreshold = Param.Int(100, "SMT LSQ Threshold Sharing Parameter")
smtIQPolicy = Param.String('Partitioned', "SMT IQ Sharing Policy")
smtIQThreshold = Param.Int(100, "SMT IQ Threshold Sharing Parameter")
smtROBPolicy = Param.String('Partitioned', "SMT ROB Sharing Policy")
smtROBThreshold = Param.Int(100, "SMT ROB Threshold Sharing Parameter")
smtCommitPolicy = Param.String('RoundRobin', "SMT Commit Policy")
branchPred = BranchPredictor(numThreads = Parent.numThreads)
needsTSO = Param.Bool(buildEnv['TARGET_ISA'] == 'x86',
"Enable TSO Memory model")
def addCheckerCpu(self):
if buildEnv['TARGET_ISA'] in ['arm']:
from ArmTLB import ArmTLB
self.checker = O3Checker(workload=self.workload,
exitOnError=False,
updateOnError=True,
warnOnlyOnLoadError=True)
self.checker.itb = ArmTLB(size = self.itb.size)
self.checker.dtb = ArmTLB(size = self.dtb.size)
self.checker.cpu_id = self.cpu_id
else:
print "ERROR: Checker only supported under ARM ISA!"
exit(1)
|
bsd-3-clause
| 5,453,706,461,443,724,000 | 1,268,108,135,342,308,400 | 46.219178 | 88 | 0.692341 | false |
Distrotech/mozjs
|
js/src/python/mock-1.0.0/tests/testmock.py
|
108
|
42396
|
# Copyright (C) 2007-2012 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# http://www.voidspace.org.uk/python/mock/
from tests.support import (
callable, unittest2, inPy3k, is_instance, next
)
import copy
import pickle
import sys
import mock
from mock import (
call, DEFAULT, patch, sentinel,
MagicMock, Mock, NonCallableMock,
NonCallableMagicMock, _CallList,
create_autospec
)
try:
unicode
except NameError:
unicode = str
class Iter(object):
def __init__(self):
self.thing = iter(['this', 'is', 'an', 'iter'])
def __iter__(self):
return self
def next(self):
return next(self.thing)
__next__ = next
class Subclass(MagicMock):
pass
class Thing(object):
attribute = 6
foo = 'bar'
class MockTest(unittest2.TestCase):
def test_all(self):
# if __all__ is badly defined then import * will raise an error
# We have to exec it because you can't import * inside a method
# in Python 3
exec("from mock import *")
def test_constructor(self):
mock = Mock()
self.assertFalse(mock.called, "called not initialised correctly")
self.assertEqual(mock.call_count, 0,
"call_count not initialised correctly")
self.assertTrue(is_instance(mock.return_value, Mock),
"return_value not initialised correctly")
self.assertEqual(mock.call_args, None,
"call_args not initialised correctly")
self.assertEqual(mock.call_args_list, [],
"call_args_list not initialised correctly")
self.assertEqual(mock.method_calls, [],
"method_calls not initialised correctly")
# Can't use hasattr for this test as it always returns True on a mock
self.assertFalse('_items' in mock.__dict__,
"default mock should not have '_items' attribute")
self.assertIsNone(mock._mock_parent,
"parent not initialised correctly")
self.assertIsNone(mock._mock_methods,
"methods not initialised correctly")
self.assertEqual(mock._mock_children, {},
"children not initialised incorrectly")
def test_unicode_not_broken(self):
# This used to raise an exception with Python 2.5 and Mock 0.4
unicode(Mock())
def test_return_value_in_constructor(self):
mock = Mock(return_value=None)
self.assertIsNone(mock.return_value,
"return value in constructor not honoured")
def test_repr(self):
mock = Mock(name='foo')
self.assertIn('foo', repr(mock))
self.assertIn("'%s'" % id(mock), repr(mock))
mocks = [(Mock(), 'mock'), (Mock(name='bar'), 'bar')]
for mock, name in mocks:
self.assertIn('%s.bar' % name, repr(mock.bar))
self.assertIn('%s.foo()' % name, repr(mock.foo()))
self.assertIn('%s.foo().bing' % name, repr(mock.foo().bing))
self.assertIn('%s()' % name, repr(mock()))
self.assertIn('%s()()' % name, repr(mock()()))
self.assertIn('%s()().foo.bar.baz().bing' % name,
repr(mock()().foo.bar.baz().bing))
def test_repr_with_spec(self):
class X(object):
pass
mock = Mock(spec=X)
self.assertIn(" spec='X' ", repr(mock))
mock = Mock(spec=X())
self.assertIn(" spec='X' ", repr(mock))
mock = Mock(spec_set=X)
self.assertIn(" spec_set='X' ", repr(mock))
mock = Mock(spec_set=X())
self.assertIn(" spec_set='X' ", repr(mock))
mock = Mock(spec=X, name='foo')
self.assertIn(" spec='X' ", repr(mock))
self.assertIn(" name='foo' ", repr(mock))
mock = Mock(name='foo')
self.assertNotIn("spec", repr(mock))
mock = Mock()
self.assertNotIn("spec", repr(mock))
mock = Mock(spec=['foo'])
self.assertNotIn("spec", repr(mock))
def test_side_effect(self):
mock = Mock()
def effect(*args, **kwargs):
raise SystemError('kablooie')
mock.side_effect = effect
self.assertRaises(SystemError, mock, 1, 2, fish=3)
mock.assert_called_with(1, 2, fish=3)
results = [1, 2, 3]
def effect():
return results.pop()
mock.side_effect = effect
self.assertEqual([mock(), mock(), mock()], [3, 2, 1],
"side effect not used correctly")
mock = Mock(side_effect=sentinel.SideEffect)
self.assertEqual(mock.side_effect, sentinel.SideEffect,
"side effect in constructor not used")
def side_effect():
return DEFAULT
mock = Mock(side_effect=side_effect, return_value=sentinel.RETURN)
self.assertEqual(mock(), sentinel.RETURN)
@unittest2.skipUnless('java' in sys.platform,
'This test only applies to Jython')
def test_java_exception_side_effect(self):
import java
mock = Mock(side_effect=java.lang.RuntimeException("Boom!"))
# can't use assertRaises with java exceptions
try:
mock(1, 2, fish=3)
except java.lang.RuntimeException:
pass
else:
self.fail('java exception not raised')
mock.assert_called_with(1,2, fish=3)
def test_reset_mock(self):
parent = Mock()
spec = ["something"]
mock = Mock(name="child", parent=parent, spec=spec)
mock(sentinel.Something, something=sentinel.SomethingElse)
something = mock.something
mock.something()
mock.side_effect = sentinel.SideEffect
return_value = mock.return_value
return_value()
mock.reset_mock()
self.assertEqual(mock._mock_name, "child",
"name incorrectly reset")
self.assertEqual(mock._mock_parent, parent,
"parent incorrectly reset")
self.assertEqual(mock._mock_methods, spec,
"methods incorrectly reset")
self.assertFalse(mock.called, "called not reset")
self.assertEqual(mock.call_count, 0, "call_count not reset")
self.assertEqual(mock.call_args, None, "call_args not reset")
self.assertEqual(mock.call_args_list, [], "call_args_list not reset")
self.assertEqual(mock.method_calls, [],
"method_calls not initialised correctly: %r != %r" %
(mock.method_calls, []))
self.assertEqual(mock.mock_calls, [])
self.assertEqual(mock.side_effect, sentinel.SideEffect,
"side_effect incorrectly reset")
self.assertEqual(mock.return_value, return_value,
"return_value incorrectly reset")
self.assertFalse(return_value.called, "return value mock not reset")
self.assertEqual(mock._mock_children, {'something': something},
"children reset incorrectly")
self.assertEqual(mock.something, something,
"children incorrectly cleared")
self.assertFalse(mock.something.called, "child not reset")
def test_reset_mock_recursion(self):
mock = Mock()
mock.return_value = mock
# used to cause recursion
mock.reset_mock()
def test_call(self):
mock = Mock()
self.assertTrue(is_instance(mock.return_value, Mock),
"Default return_value should be a Mock")
result = mock()
self.assertEqual(mock(), result,
"different result from consecutive calls")
mock.reset_mock()
ret_val = mock(sentinel.Arg)
self.assertTrue(mock.called, "called not set")
self.assertEqual(mock.call_count, 1, "call_count incoreect")
self.assertEqual(mock.call_args, ((sentinel.Arg,), {}),
"call_args not set")
self.assertEqual(mock.call_args_list, [((sentinel.Arg,), {})],
"call_args_list not initialised correctly")
mock.return_value = sentinel.ReturnValue
ret_val = mock(sentinel.Arg, key=sentinel.KeyArg)
self.assertEqual(ret_val, sentinel.ReturnValue,
"incorrect return value")
self.assertEqual(mock.call_count, 2, "call_count incorrect")
self.assertEqual(mock.call_args,
((sentinel.Arg,), {'key': sentinel.KeyArg}),
"call_args not set")
self.assertEqual(mock.call_args_list, [
((sentinel.Arg,), {}),
((sentinel.Arg,), {'key': sentinel.KeyArg})
],
"call_args_list not set")
def test_call_args_comparison(self):
mock = Mock()
mock()
mock(sentinel.Arg)
mock(kw=sentinel.Kwarg)
mock(sentinel.Arg, kw=sentinel.Kwarg)
self.assertEqual(mock.call_args_list, [
(),
((sentinel.Arg,),),
({"kw": sentinel.Kwarg},),
((sentinel.Arg,), {"kw": sentinel.Kwarg})
])
self.assertEqual(mock.call_args,
((sentinel.Arg,), {"kw": sentinel.Kwarg}))
def test_assert_called_with(self):
mock = Mock()
mock()
# Will raise an exception if it fails
mock.assert_called_with()
self.assertRaises(AssertionError, mock.assert_called_with, 1)
mock.reset_mock()
self.assertRaises(AssertionError, mock.assert_called_with)
mock(1, 2, 3, a='fish', b='nothing')
mock.assert_called_with(1, 2, 3, a='fish', b='nothing')
def test_assert_called_once_with(self):
mock = Mock()
mock()
# Will raise an exception if it fails
mock.assert_called_once_with()
mock()
self.assertRaises(AssertionError, mock.assert_called_once_with)
mock.reset_mock()
self.assertRaises(AssertionError, mock.assert_called_once_with)
mock('foo', 'bar', baz=2)
mock.assert_called_once_with('foo', 'bar', baz=2)
mock.reset_mock()
mock('foo', 'bar', baz=2)
self.assertRaises(
AssertionError,
lambda: mock.assert_called_once_with('bob', 'bar', baz=2)
)
def test_attribute_access_returns_mocks(self):
mock = Mock()
something = mock.something
self.assertTrue(is_instance(something, Mock), "attribute isn't a mock")
self.assertEqual(mock.something, something,
"different attributes returned for same name")
# Usage example
mock = Mock()
mock.something.return_value = 3
self.assertEqual(mock.something(), 3, "method returned wrong value")
self.assertTrue(mock.something.called,
"method didn't record being called")
def test_attributes_have_name_and_parent_set(self):
mock = Mock()
something = mock.something
self.assertEqual(something._mock_name, "something",
"attribute name not set correctly")
self.assertEqual(something._mock_parent, mock,
"attribute parent not set correctly")
def test_method_calls_recorded(self):
mock = Mock()
mock.something(3, fish=None)
mock.something_else.something(6, cake=sentinel.Cake)
self.assertEqual(mock.something_else.method_calls,
[("something", (6,), {'cake': sentinel.Cake})],
"method calls not recorded correctly")
self.assertEqual(mock.method_calls, [
("something", (3,), {'fish': None}),
("something_else.something", (6,), {'cake': sentinel.Cake})
],
"method calls not recorded correctly")
def test_method_calls_compare_easily(self):
mock = Mock()
mock.something()
self.assertEqual(mock.method_calls, [('something',)])
self.assertEqual(mock.method_calls, [('something', (), {})])
mock = Mock()
mock.something('different')
self.assertEqual(mock.method_calls, [('something', ('different',))])
self.assertEqual(mock.method_calls,
[('something', ('different',), {})])
mock = Mock()
mock.something(x=1)
self.assertEqual(mock.method_calls, [('something', {'x': 1})])
self.assertEqual(mock.method_calls, [('something', (), {'x': 1})])
mock = Mock()
mock.something('different', some='more')
self.assertEqual(mock.method_calls, [
('something', ('different',), {'some': 'more'})
])
def test_only_allowed_methods_exist(self):
for spec in ['something'], ('something',):
for arg in 'spec', 'spec_set':
mock = Mock(**{arg: spec})
# this should be allowed
mock.something
self.assertRaisesRegexp(
AttributeError,
"Mock object has no attribute 'something_else'",
getattr, mock, 'something_else'
)
def test_from_spec(self):
class Something(object):
x = 3
__something__ = None
def y(self):
pass
def test_attributes(mock):
# should work
mock.x
mock.y
mock.__something__
self.assertRaisesRegexp(
AttributeError,
"Mock object has no attribute 'z'",
getattr, mock, 'z'
)
self.assertRaisesRegexp(
AttributeError,
"Mock object has no attribute '__foobar__'",
getattr, mock, '__foobar__'
)
test_attributes(Mock(spec=Something))
test_attributes(Mock(spec=Something()))
def test_wraps_calls(self):
real = Mock()
mock = Mock(wraps=real)
self.assertEqual(mock(), real())
real.reset_mock()
mock(1, 2, fish=3)
real.assert_called_with(1, 2, fish=3)
def test_wraps_call_with_nondefault_return_value(self):
real = Mock()
mock = Mock(wraps=real)
mock.return_value = 3
self.assertEqual(mock(), 3)
self.assertFalse(real.called)
def test_wraps_attributes(self):
class Real(object):
attribute = Mock()
real = Real()
mock = Mock(wraps=real)
self.assertEqual(mock.attribute(), real.attribute())
self.assertRaises(AttributeError, lambda: mock.fish)
self.assertNotEqual(mock.attribute, real.attribute)
result = mock.attribute.frog(1, 2, fish=3)
Real.attribute.frog.assert_called_with(1, 2, fish=3)
self.assertEqual(result, Real.attribute.frog())
def test_exceptional_side_effect(self):
mock = Mock(side_effect=AttributeError)
self.assertRaises(AttributeError, mock)
mock = Mock(side_effect=AttributeError('foo'))
self.assertRaises(AttributeError, mock)
def test_baseexceptional_side_effect(self):
mock = Mock(side_effect=KeyboardInterrupt)
self.assertRaises(KeyboardInterrupt, mock)
mock = Mock(side_effect=KeyboardInterrupt('foo'))
self.assertRaises(KeyboardInterrupt, mock)
def test_assert_called_with_message(self):
mock = Mock()
self.assertRaisesRegexp(AssertionError, 'Not called',
mock.assert_called_with)
def test__name__(self):
mock = Mock()
self.assertRaises(AttributeError, lambda: mock.__name__)
mock.__name__ = 'foo'
self.assertEqual(mock.__name__, 'foo')
def test_spec_list_subclass(self):
class Sub(list):
pass
mock = Mock(spec=Sub(['foo']))
mock.append(3)
mock.append.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock, 'foo')
def test_spec_class(self):
class X(object):
pass
mock = Mock(spec=X)
self.assertTrue(isinstance(mock, X))
mock = Mock(spec=X())
self.assertTrue(isinstance(mock, X))
self.assertIs(mock.__class__, X)
self.assertEqual(Mock().__class__.__name__, 'Mock')
mock = Mock(spec_set=X)
self.assertTrue(isinstance(mock, X))
mock = Mock(spec_set=X())
self.assertTrue(isinstance(mock, X))
def test_setting_attribute_with_spec_set(self):
class X(object):
y = 3
mock = Mock(spec=X)
mock.x = 'foo'
mock = Mock(spec_set=X)
def set_attr():
mock.x = 'foo'
mock.y = 'foo'
self.assertRaises(AttributeError, set_attr)
def test_copy(self):
current = sys.getrecursionlimit()
self.addCleanup(sys.setrecursionlimit, current)
# can't use sys.maxint as this doesn't exist in Python 3
sys.setrecursionlimit(int(10e8))
# this segfaults without the fix in place
copy.copy(Mock())
@unittest2.skipIf(inPy3k, "no old style classes in Python 3")
def test_spec_old_style_classes(self):
class Foo:
bar = 7
mock = Mock(spec=Foo)
mock.bar = 6
self.assertRaises(AttributeError, lambda: mock.foo)
mock = Mock(spec=Foo())
mock.bar = 6
self.assertRaises(AttributeError, lambda: mock.foo)
@unittest2.skipIf(inPy3k, "no old style classes in Python 3")
def test_spec_set_old_style_classes(self):
class Foo:
bar = 7
mock = Mock(spec_set=Foo)
mock.bar = 6
self.assertRaises(AttributeError, lambda: mock.foo)
def _set():
mock.foo = 3
self.assertRaises(AttributeError, _set)
mock = Mock(spec_set=Foo())
mock.bar = 6
self.assertRaises(AttributeError, lambda: mock.foo)
def _set():
mock.foo = 3
self.assertRaises(AttributeError, _set)
def test_subclass_with_properties(self):
class SubClass(Mock):
def _get(self):
return 3
def _set(self, value):
raise NameError('strange error')
some_attribute = property(_get, _set)
s = SubClass(spec_set=SubClass)
self.assertEqual(s.some_attribute, 3)
def test():
s.some_attribute = 3
self.assertRaises(NameError, test)
def test():
s.foo = 'bar'
self.assertRaises(AttributeError, test)
def test_setting_call(self):
mock = Mock()
def __call__(self, a):
return self._mock_call(a)
type(mock).__call__ = __call__
mock('one')
mock.assert_called_with('one')
self.assertRaises(TypeError, mock, 'one', 'two')
@unittest2.skipUnless(sys.version_info[:2] >= (2, 6),
"__dir__ not available until Python 2.6 or later")
def test_dir(self):
mock = Mock()
attrs = set(dir(mock))
type_attrs = set([m for m in dir(Mock) if not m.startswith('_')])
# all public attributes from the type are included
self.assertEqual(set(), type_attrs - attrs)
# creates these attributes
mock.a, mock.b
self.assertIn('a', dir(mock))
self.assertIn('b', dir(mock))
# instance attributes
mock.c = mock.d = None
self.assertIn('c', dir(mock))
self.assertIn('d', dir(mock))
# magic methods
mock.__iter__ = lambda s: iter([])
self.assertIn('__iter__', dir(mock))
@unittest2.skipUnless(sys.version_info[:2] >= (2, 6),
"__dir__ not available until Python 2.6 or later")
def test_dir_from_spec(self):
mock = Mock(spec=unittest2.TestCase)
testcase_attrs = set(dir(unittest2.TestCase))
attrs = set(dir(mock))
# all attributes from the spec are included
self.assertEqual(set(), testcase_attrs - attrs)
# shadow a sys attribute
mock.version = 3
self.assertEqual(dir(mock).count('version'), 1)
@unittest2.skipUnless(sys.version_info[:2] >= (2, 6),
"__dir__ not available until Python 2.6 or later")
def test_filter_dir(self):
patcher = patch.object(mock, 'FILTER_DIR', False)
patcher.start()
try:
attrs = set(dir(Mock()))
type_attrs = set(dir(Mock))
# ALL attributes from the type are included
self.assertEqual(set(), type_attrs - attrs)
finally:
patcher.stop()
def test_configure_mock(self):
mock = Mock(foo='bar')
self.assertEqual(mock.foo, 'bar')
mock = MagicMock(foo='bar')
self.assertEqual(mock.foo, 'bar')
kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33,
'foo': MagicMock()}
mock = Mock(**kwargs)
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
mock = Mock()
mock.configure_mock(**kwargs)
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
def assertRaisesWithMsg(self, exception, message, func, *args, **kwargs):
# needed because assertRaisesRegex doesn't work easily with newlines
try:
func(*args, **kwargs)
except:
instance = sys.exc_info()[1]
self.assertIsInstance(instance, exception)
else:
self.fail('Exception %r not raised' % (exception,))
msg = str(instance)
self.assertEqual(msg, message)
def test_assert_called_with_failure_message(self):
mock = NonCallableMock()
expected = "mock(1, '2', 3, bar='foo')"
message = 'Expected call: %s\nNot called'
self.assertRaisesWithMsg(
AssertionError, message % (expected,),
mock.assert_called_with, 1, '2', 3, bar='foo'
)
mock.foo(1, '2', 3, foo='foo')
asserters = [
mock.foo.assert_called_with, mock.foo.assert_called_once_with
]
for meth in asserters:
actual = "foo(1, '2', 3, foo='foo')"
expected = "foo(1, '2', 3, bar='foo')"
message = 'Expected call: %s\nActual call: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual),
meth, 1, '2', 3, bar='foo'
)
# just kwargs
for meth in asserters:
actual = "foo(1, '2', 3, foo='foo')"
expected = "foo(bar='foo')"
message = 'Expected call: %s\nActual call: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual),
meth, bar='foo'
)
# just args
for meth in asserters:
actual = "foo(1, '2', 3, foo='foo')"
expected = "foo(1, 2, 3)"
message = 'Expected call: %s\nActual call: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual),
meth, 1, 2, 3
)
# empty
for meth in asserters:
actual = "foo(1, '2', 3, foo='foo')"
expected = "foo()"
message = 'Expected call: %s\nActual call: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual), meth
)
def test_mock_calls(self):
mock = MagicMock()
# need to do this because MagicMock.mock_calls used to just return
# a MagicMock which also returned a MagicMock when __eq__ was called
self.assertIs(mock.mock_calls == [], True)
mock = MagicMock()
mock()
expected = [('', (), {})]
self.assertEqual(mock.mock_calls, expected)
mock.foo()
expected.append(call.foo())
self.assertEqual(mock.mock_calls, expected)
# intermediate mock_calls work too
self.assertEqual(mock.foo.mock_calls, [('', (), {})])
mock = MagicMock()
mock().foo(1, 2, 3, a=4, b=5)
expected = [
('', (), {}), ('().foo', (1, 2, 3), dict(a=4, b=5))
]
self.assertEqual(mock.mock_calls, expected)
self.assertEqual(mock.return_value.foo.mock_calls,
[('', (1, 2, 3), dict(a=4, b=5))])
self.assertEqual(mock.return_value.mock_calls,
[('foo', (1, 2, 3), dict(a=4, b=5))])
mock = MagicMock()
mock().foo.bar().baz()
expected = [
('', (), {}), ('().foo.bar', (), {}),
('().foo.bar().baz', (), {})
]
self.assertEqual(mock.mock_calls, expected)
self.assertEqual(mock().mock_calls,
call.foo.bar().baz().call_list())
for kwargs in dict(), dict(name='bar'):
mock = MagicMock(**kwargs)
int(mock.foo)
expected = [('foo.__int__', (), {})]
self.assertEqual(mock.mock_calls, expected)
mock = MagicMock(**kwargs)
mock.a()()
expected = [('a', (), {}), ('a()', (), {})]
self.assertEqual(mock.mock_calls, expected)
self.assertEqual(mock.a().mock_calls, [call()])
mock = MagicMock(**kwargs)
mock(1)(2)(3)
self.assertEqual(mock.mock_calls, call(1)(2)(3).call_list())
self.assertEqual(mock().mock_calls, call(2)(3).call_list())
self.assertEqual(mock()().mock_calls, call(3).call_list())
mock = MagicMock(**kwargs)
mock(1)(2)(3).a.b.c(4)
self.assertEqual(mock.mock_calls,
call(1)(2)(3).a.b.c(4).call_list())
self.assertEqual(mock().mock_calls,
call(2)(3).a.b.c(4).call_list())
self.assertEqual(mock()().mock_calls,
call(3).a.b.c(4).call_list())
mock = MagicMock(**kwargs)
int(mock().foo.bar().baz())
last_call = ('().foo.bar().baz().__int__', (), {})
self.assertEqual(mock.mock_calls[-1], last_call)
self.assertEqual(mock().mock_calls,
call.foo.bar().baz().__int__().call_list())
self.assertEqual(mock().foo.bar().mock_calls,
call.baz().__int__().call_list())
self.assertEqual(mock().foo.bar().baz.mock_calls,
call().__int__().call_list())
def test_subclassing(self):
class Subclass(Mock):
pass
mock = Subclass()
self.assertIsInstance(mock.foo, Subclass)
self.assertIsInstance(mock(), Subclass)
class Subclass(Mock):
def _get_child_mock(self, **kwargs):
return Mock(**kwargs)
mock = Subclass()
self.assertNotIsInstance(mock.foo, Subclass)
self.assertNotIsInstance(mock(), Subclass)
def test_arg_lists(self):
mocks = [
Mock(),
MagicMock(),
NonCallableMock(),
NonCallableMagicMock()
]
def assert_attrs(mock):
names = 'call_args_list', 'method_calls', 'mock_calls'
for name in names:
attr = getattr(mock, name)
self.assertIsInstance(attr, _CallList)
self.assertIsInstance(attr, list)
self.assertEqual(attr, [])
for mock in mocks:
assert_attrs(mock)
if callable(mock):
mock()
mock(1, 2)
mock(a=3)
mock.reset_mock()
assert_attrs(mock)
mock.foo()
mock.foo.bar(1, a=3)
mock.foo(1).bar().baz(3)
mock.reset_mock()
assert_attrs(mock)
def test_call_args_two_tuple(self):
mock = Mock()
mock(1, a=3)
mock(2, b=4)
self.assertEqual(len(mock.call_args), 2)
args, kwargs = mock.call_args
self.assertEqual(args, (2,))
self.assertEqual(kwargs, dict(b=4))
expected_list = [((1,), dict(a=3)), ((2,), dict(b=4))]
for expected, call_args in zip(expected_list, mock.call_args_list):
self.assertEqual(len(call_args), 2)
self.assertEqual(expected[0], call_args[0])
self.assertEqual(expected[1], call_args[1])
def test_side_effect_iterator(self):
mock = Mock(side_effect=iter([1, 2, 3]))
self.assertEqual([mock(), mock(), mock()], [1, 2, 3])
self.assertRaises(StopIteration, mock)
mock = MagicMock(side_effect=['a', 'b', 'c'])
self.assertEqual([mock(), mock(), mock()], ['a', 'b', 'c'])
self.assertRaises(StopIteration, mock)
mock = Mock(side_effect='ghi')
self.assertEqual([mock(), mock(), mock()], ['g', 'h', 'i'])
self.assertRaises(StopIteration, mock)
class Foo(object):
pass
mock = MagicMock(side_effect=Foo)
self.assertIsInstance(mock(), Foo)
mock = Mock(side_effect=Iter())
self.assertEqual([mock(), mock(), mock(), mock()],
['this', 'is', 'an', 'iter'])
self.assertRaises(StopIteration, mock)
def test_side_effect_setting_iterator(self):
mock = Mock()
mock.side_effect = iter([1, 2, 3])
self.assertEqual([mock(), mock(), mock()], [1, 2, 3])
self.assertRaises(StopIteration, mock)
side_effect = mock.side_effect
self.assertIsInstance(side_effect, type(iter([])))
mock.side_effect = ['a', 'b', 'c']
self.assertEqual([mock(), mock(), mock()], ['a', 'b', 'c'])
self.assertRaises(StopIteration, mock)
side_effect = mock.side_effect
self.assertIsInstance(side_effect, type(iter([])))
this_iter = Iter()
mock.side_effect = this_iter
self.assertEqual([mock(), mock(), mock(), mock()],
['this', 'is', 'an', 'iter'])
self.assertRaises(StopIteration, mock)
self.assertIs(mock.side_effect, this_iter)
def test_side_effect_iterator_exceptions(self):
for Klass in Mock, MagicMock:
iterable = (ValueError, 3, KeyError, 6)
m = Klass(side_effect=iterable)
self.assertRaises(ValueError, m)
self.assertEqual(m(), 3)
self.assertRaises(KeyError, m)
self.assertEqual(m(), 6)
def test_assert_has_calls_any_order(self):
mock = Mock()
mock(1, 2)
mock(a=3)
mock(3, 4)
mock(b=6)
mock(b=6)
kalls = [
call(1, 2), ({'a': 3},),
((3, 4),), ((), {'a': 3}),
('', (1, 2)), ('', {'a': 3}),
('', (1, 2), {}), ('', (), {'a': 3})
]
for kall in kalls:
mock.assert_has_calls([kall], any_order=True)
for kall in call(1, '2'), call(b=3), call(), 3, None, 'foo':
self.assertRaises(
AssertionError, mock.assert_has_calls,
[kall], any_order=True
)
kall_lists = [
[call(1, 2), call(b=6)],
[call(3, 4), call(1, 2)],
[call(b=6), call(b=6)],
]
for kall_list in kall_lists:
mock.assert_has_calls(kall_list, any_order=True)
kall_lists = [
[call(b=6), call(b=6), call(b=6)],
[call(1, 2), call(1, 2)],
[call(3, 4), call(1, 2), call(5, 7)],
[call(b=6), call(3, 4), call(b=6), call(1, 2), call(b=6)],
]
for kall_list in kall_lists:
self.assertRaises(
AssertionError, mock.assert_has_calls,
kall_list, any_order=True
)
def test_assert_has_calls(self):
kalls1 = [
call(1, 2), ({'a': 3},),
((3, 4),), call(b=6),
('', (1,), {'b': 6}),
]
kalls2 = [call.foo(), call.bar(1)]
kalls2.extend(call.spam().baz(a=3).call_list())
kalls2.extend(call.bam(set(), foo={}).fish([1]).call_list())
mocks = []
for mock in Mock(), MagicMock():
mock(1, 2)
mock(a=3)
mock(3, 4)
mock(b=6)
mock(1, b=6)
mocks.append((mock, kalls1))
mock = Mock()
mock.foo()
mock.bar(1)
mock.spam().baz(a=3)
mock.bam(set(), foo={}).fish([1])
mocks.append((mock, kalls2))
for mock, kalls in mocks:
for i in range(len(kalls)):
for step in 1, 2, 3:
these = kalls[i:i+step]
mock.assert_has_calls(these)
if len(these) > 1:
self.assertRaises(
AssertionError,
mock.assert_has_calls,
list(reversed(these))
)
def test_assert_any_call(self):
mock = Mock()
mock(1, 2)
mock(a=3)
mock(1, b=6)
mock.assert_any_call(1, 2)
mock.assert_any_call(a=3)
mock.assert_any_call(1, b=6)
self.assertRaises(
AssertionError,
mock.assert_any_call
)
self.assertRaises(
AssertionError,
mock.assert_any_call,
1, 3
)
self.assertRaises(
AssertionError,
mock.assert_any_call,
a=4
)
def test_mock_calls_create_autospec(self):
def f(a, b):
pass
obj = Iter()
obj.f = f
funcs = [
create_autospec(f),
create_autospec(obj).f
]
for func in funcs:
func(1, 2)
func(3, 4)
self.assertEqual(
func.mock_calls, [call(1, 2), call(3, 4)]
)
def test_mock_add_spec(self):
class _One(object):
one = 1
class _Two(object):
two = 2
class Anything(object):
one = two = three = 'four'
klasses = [
Mock, MagicMock, NonCallableMock, NonCallableMagicMock
]
for Klass in list(klasses):
klasses.append(lambda K=Klass: K(spec=Anything))
klasses.append(lambda K=Klass: K(spec_set=Anything))
for Klass in klasses:
for kwargs in dict(), dict(spec_set=True):
mock = Klass()
#no error
mock.one, mock.two, mock.three
for One, Two in [(_One, _Two), (['one'], ['two'])]:
for kwargs in dict(), dict(spec_set=True):
mock.mock_add_spec(One, **kwargs)
mock.one
self.assertRaises(
AttributeError, getattr, mock, 'two'
)
self.assertRaises(
AttributeError, getattr, mock, 'three'
)
if 'spec_set' in kwargs:
self.assertRaises(
AttributeError, setattr, mock, 'three', None
)
mock.mock_add_spec(Two, **kwargs)
self.assertRaises(
AttributeError, getattr, mock, 'one'
)
mock.two
self.assertRaises(
AttributeError, getattr, mock, 'three'
)
if 'spec_set' in kwargs:
self.assertRaises(
AttributeError, setattr, mock, 'three', None
)
# note that creating a mock, setting an instance attribute, and
# *then* setting a spec doesn't work. Not the intended use case
def test_mock_add_spec_magic_methods(self):
for Klass in MagicMock, NonCallableMagicMock:
mock = Klass()
int(mock)
mock.mock_add_spec(object)
self.assertRaises(TypeError, int, mock)
mock = Klass()
mock['foo']
mock.__int__.return_value =4
mock.mock_add_spec(int)
self.assertEqual(int(mock), 4)
self.assertRaises(TypeError, lambda: mock['foo'])
def test_adding_child_mock(self):
for Klass in NonCallableMock, Mock, MagicMock, NonCallableMagicMock:
mock = Klass()
mock.foo = Mock()
mock.foo()
self.assertEqual(mock.method_calls, [call.foo()])
self.assertEqual(mock.mock_calls, [call.foo()])
mock = Klass()
mock.bar = Mock(name='name')
mock.bar()
self.assertEqual(mock.method_calls, [])
self.assertEqual(mock.mock_calls, [])
# mock with an existing _new_parent but no name
mock = Klass()
mock.baz = MagicMock()()
mock.baz()
self.assertEqual(mock.method_calls, [])
self.assertEqual(mock.mock_calls, [])
def test_adding_return_value_mock(self):
for Klass in Mock, MagicMock:
mock = Klass()
mock.return_value = MagicMock()
mock()()
self.assertEqual(mock.mock_calls, [call(), call()()])
def test_manager_mock(self):
class Foo(object):
one = 'one'
two = 'two'
manager = Mock()
p1 = patch.object(Foo, 'one')
p2 = patch.object(Foo, 'two')
mock_one = p1.start()
self.addCleanup(p1.stop)
mock_two = p2.start()
self.addCleanup(p2.stop)
manager.attach_mock(mock_one, 'one')
manager.attach_mock(mock_two, 'two')
Foo.two()
Foo.one()
self.assertEqual(manager.mock_calls, [call.two(), call.one()])
def test_magic_methods_mock_calls(self):
for Klass in Mock, MagicMock:
m = Klass()
m.__int__ = Mock(return_value=3)
m.__float__ = MagicMock(return_value=3.0)
int(m)
float(m)
self.assertEqual(m.mock_calls, [call.__int__(), call.__float__()])
self.assertEqual(m.method_calls, [])
def test_attribute_deletion(self):
# this behaviour isn't *useful*, but at least it's now tested...
for Klass in Mock, MagicMock, NonCallableMagicMock, NonCallableMock:
m = Klass()
original = m.foo
m.foo = 3
del m.foo
self.assertEqual(m.foo, original)
new = m.foo = Mock()
del m.foo
self.assertEqual(m.foo, new)
def test_mock_parents(self):
for Klass in Mock, MagicMock:
m = Klass()
original_repr = repr(m)
m.return_value = m
self.assertIs(m(), m)
self.assertEqual(repr(m), original_repr)
m.reset_mock()
self.assertIs(m(), m)
self.assertEqual(repr(m), original_repr)
m = Klass()
m.b = m.a
self.assertIn("name='mock.a'", repr(m.b))
self.assertIn("name='mock.a'", repr(m.a))
m.reset_mock()
self.assertIn("name='mock.a'", repr(m.b))
self.assertIn("name='mock.a'", repr(m.a))
m = Klass()
original_repr = repr(m)
m.a = m()
m.a.return_value = m
self.assertEqual(repr(m), original_repr)
self.assertEqual(repr(m.a()), original_repr)
def test_attach_mock(self):
classes = Mock, MagicMock, NonCallableMagicMock, NonCallableMock
for Klass in classes:
for Klass2 in classes:
m = Klass()
m2 = Klass2(name='foo')
m.attach_mock(m2, 'bar')
self.assertIs(m.bar, m2)
self.assertIn("name='mock.bar'", repr(m2))
m.bar.baz(1)
self.assertEqual(m.mock_calls, [call.bar.baz(1)])
self.assertEqual(m.method_calls, [call.bar.baz(1)])
def test_attach_mock_return_value(self):
classes = Mock, MagicMock, NonCallableMagicMock, NonCallableMock
for Klass in Mock, MagicMock:
for Klass2 in classes:
m = Klass()
m2 = Klass2(name='foo')
m.attach_mock(m2, 'return_value')
self.assertIs(m(), m2)
self.assertIn("name='mock()'", repr(m2))
m2.foo()
self.assertEqual(m.mock_calls, call().foo().call_list())
def test_attribute_deletion(self):
for mock in Mock(), MagicMock():
self.assertTrue(hasattr(mock, 'm'))
del mock.m
self.assertFalse(hasattr(mock, 'm'))
del mock.f
self.assertFalse(hasattr(mock, 'f'))
self.assertRaises(AttributeError, getattr, mock, 'f')
def test_class_assignable(self):
for mock in Mock(), MagicMock():
self.assertNotIsInstance(mock, int)
mock.__class__ = int
self.assertIsInstance(mock, int)
@unittest2.expectedFailure
def test_pickle(self):
for Klass in (MagicMock, Mock, Subclass, NonCallableMagicMock):
mock = Klass(name='foo', attribute=3)
mock.foo(1, 2, 3)
data = pickle.dumps(mock)
new = pickle.loads(data)
new.foo.assert_called_once_with(1, 2, 3)
self.assertFalse(new.called)
self.assertTrue(is_instance(new, Klass))
self.assertIsInstance(new, Thing)
self.assertIn('name="foo"', repr(new))
self.assertEqual(new.attribute, 3)
if __name__ == '__main__':
unittest2.main()
|
mpl-2.0
| 444,869,123,980,241,200 | -2,288,467,903,423,084,500 | 30.381199 | 79 | 0.521889 | false |
aronsky/home-assistant
|
homeassistant/components/switch/vultr.py
|
7
|
3608
|
"""
Support for interacting with Vultr subscriptions.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/switch.vultr/
"""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import CONF_NAME
from homeassistant.components.switch import (SwitchDevice, PLATFORM_SCHEMA)
from homeassistant.components.vultr import (
CONF_SUBSCRIPTION, ATTR_AUTO_BACKUPS, ATTR_ALLOWED_BANDWIDTH,
ATTR_CREATED_AT, ATTR_SUBSCRIPTION_ID, ATTR_SUBSCRIPTION_NAME,
ATTR_IPV4_ADDRESS, ATTR_IPV6_ADDRESS, ATTR_MEMORY, ATTR_DISK,
ATTR_COST_PER_MONTH, ATTR_OS, ATTR_REGION, ATTR_VCPUS, DATA_VULTR)
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Vultr {}'
DEPENDENCIES = ['vultr']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_SUBSCRIPTION): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Vultr subscription switch."""
vultr = hass.data[DATA_VULTR]
subscription = config.get(CONF_SUBSCRIPTION)
name = config.get(CONF_NAME)
if subscription not in vultr.data:
_LOGGER.error("Subscription %s not found", subscription)
return False
add_entities([VultrSwitch(vultr, subscription, name)], True)
class VultrSwitch(SwitchDevice):
"""Representation of a Vultr subscription switch."""
def __init__(self, vultr, subscription, name):
"""Initialize a new Vultr switch."""
self._vultr = vultr
self._name = name
self.subscription = subscription
self.data = None
@property
def name(self):
"""Return the name of the switch."""
try:
return self._name.format(self.data['label'])
except (TypeError, KeyError):
return self._name
@property
def is_on(self):
"""Return true if switch is on."""
return self.data['power_status'] == 'running'
@property
def icon(self):
"""Return the icon of this server."""
return 'mdi:server' if self.is_on else 'mdi:server-off'
@property
def device_state_attributes(self):
"""Return the state attributes of the Vultr subscription."""
return {
ATTR_ALLOWED_BANDWIDTH: self.data.get('allowed_bandwidth_gb'),
ATTR_AUTO_BACKUPS: self.data.get('auto_backups'),
ATTR_COST_PER_MONTH: self.data.get('cost_per_month'),
ATTR_CREATED_AT: self.data.get('date_created'),
ATTR_DISK: self.data.get('disk'),
ATTR_IPV4_ADDRESS: self.data.get('main_ip'),
ATTR_IPV6_ADDRESS: self.data.get('v6_main_ip'),
ATTR_MEMORY: self.data.get('ram'),
ATTR_OS: self.data.get('os'),
ATTR_REGION: self.data.get('location'),
ATTR_SUBSCRIPTION_ID: self.data.get('SUBID'),
ATTR_SUBSCRIPTION_NAME: self.data.get('label'),
ATTR_VCPUS: self.data.get('vcpu_count'),
}
def turn_on(self, **kwargs):
"""Boot-up the subscription."""
if self.data['power_status'] != 'running':
self._vultr.start(self.subscription)
def turn_off(self, **kwargs):
"""Halt the subscription."""
if self.data['power_status'] == 'running':
self._vultr.halt(self.subscription)
def update(self):
"""Get the latest data from the device and update the data."""
self._vultr.update()
self.data = self._vultr.data[self.subscription]
|
apache-2.0
| 7,680,164,090,144,887,000 | 6,777,601,673,283,567,000 | 33.037736 | 75 | 0.640521 | false |
mattcaldwell/boto
|
boto/ec2/autoscale/request.py
|
68
|
1549
|
# Copyright (c) 2009 Reza Lotun http://reza.lotun.name/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class Request(object):
def __init__(self, connection=None):
self.connection = connection
self.request_id = ''
def __repr__(self):
return 'Request:%s' % self.request_id
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'RequestId':
self.request_id = value
else:
setattr(self, name, value)
|
mit
| -7,379,827,204,257,645,000 | -5,056,108,627,618,142,000 | 39.763158 | 74 | 0.714009 | false |
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/ThirdParty/Twisted/twisted/python/compat.py
|
22
|
11514
|
# -*- test-case-name: twisted.test.test_compat -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Compatibility module to provide backwards compatibility for useful Python
features.
This is mainly for use of internal Twisted code. We encourage you to use
the latest version of Python directly from your code, if possible.
@var unicode: The type of Unicode strings, C{unicode} on Python 2 and C{str}
on Python 3.
@var NativeStringIO: An in-memory file-like object that operates on the native
string type (bytes in Python 2, unicode in Python 3).
"""
from __future__ import division
import sys, string, socket, struct
if sys.version_info < (3, 0):
_PY3 = False
else:
_PY3 = True
def inet_pton(af, addr):
if af == socket.AF_INET:
return socket.inet_aton(addr)
elif af == getattr(socket, 'AF_INET6', 'AF_INET6'):
if [x for x in addr if x not in string.hexdigits + ':.']:
raise ValueError("Illegal characters: %r" % (''.join(x),))
parts = addr.split(':')
elided = parts.count('')
ipv4Component = '.' in parts[-1]
if len(parts) > (8 - ipv4Component) or elided > 3:
raise ValueError("Syntactically invalid address")
if elided == 3:
return '\x00' * 16
if elided:
zeros = ['0'] * (8 - len(parts) - ipv4Component + elided)
if addr.startswith('::'):
parts[:2] = zeros
elif addr.endswith('::'):
parts[-2:] = zeros
else:
idx = parts.index('')
parts[idx:idx+1] = zeros
if len(parts) != 8 - ipv4Component:
raise ValueError("Syntactically invalid address")
else:
if len(parts) != (8 - ipv4Component):
raise ValueError("Syntactically invalid address")
if ipv4Component:
if parts[-1].count('.') != 3:
raise ValueError("Syntactically invalid address")
rawipv4 = socket.inet_aton(parts[-1])
unpackedipv4 = struct.unpack('!HH', rawipv4)
parts[-1:] = [hex(x)[2:] for x in unpackedipv4]
parts = [int(x, 16) for x in parts]
return struct.pack('!8H', *parts)
else:
raise socket.error(97, 'Address family not supported by protocol')
def inet_ntop(af, addr):
if af == socket.AF_INET:
return socket.inet_ntoa(addr)
elif af == socket.AF_INET6:
if len(addr) != 16:
raise ValueError("address length incorrect")
parts = struct.unpack('!8H', addr)
curBase = bestBase = None
for i in range(8):
if not parts[i]:
if curBase is None:
curBase = i
curLen = 0
curLen += 1
else:
if curBase is not None:
if bestBase is None or curLen > bestLen:
bestBase = curBase
bestLen = curLen
curBase = None
if curBase is not None and (bestBase is None or curLen > bestLen):
bestBase = curBase
bestLen = curLen
parts = [hex(x)[2:] for x in parts]
if bestBase is not None:
parts[bestBase:bestBase + bestLen] = ['']
if parts[0] == '':
parts.insert(0, '')
if parts[-1] == '':
parts.insert(len(parts) - 1, '')
return ':'.join(parts)
else:
raise socket.error(97, 'Address family not supported by protocol')
try:
socket.AF_INET6
except AttributeError:
socket.AF_INET6 = 'AF_INET6'
try:
socket.inet_pton(socket.AF_INET6, "::")
except (AttributeError, NameError, socket.error):
socket.inet_pton = inet_pton
socket.inet_ntop = inet_ntop
adict = dict
if _PY3:
# These are actually useless in Python 2 as well, but we need to go
# through deprecation process there (ticket #5895):
del adict, inet_pton, inet_ntop
set = set
frozenset = frozenset
try:
from functools import reduce
except ImportError:
reduce = reduce
def execfile(filename, globals, locals=None):
"""
Execute a Python script in the given namespaces.
Similar to the execfile builtin, but a namespace is mandatory, partly
because that's a sensible thing to require, and because otherwise we'd
have to do some frame hacking.
This is a compatibility implementation for Python 3 porting, to avoid the
use of the deprecated builtin C{execfile} function.
"""
if locals is None:
locals = globals
fin = open(filename, "rbU")
try:
source = fin.read()
finally:
fin.close()
code = compile(source, filename, "exec")
exec(code, globals, locals)
try:
cmp = cmp
except NameError:
def cmp(a, b):
"""
Compare two objects.
Returns a negative number if C{a < b}, zero if they are equal, and a
positive number if C{a > b}.
"""
if a < b:
return -1
elif a == b:
return 0
else:
return 1
def comparable(klass):
"""
Class decorator that ensures support for the special C{__cmp__} method.
On Python 2 this does nothing.
On Python 3, C{__eq__}, C{__lt__}, etc. methods are added to the class,
relying on C{__cmp__} to implement their comparisons.
"""
# On Python 2, __cmp__ will just work, so no need to add extra methods:
if not _PY3:
return klass
def __eq__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c == 0
def __ne__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c != 0
def __lt__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c < 0
def __le__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c <= 0
def __gt__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c > 0
def __ge__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c >= 0
klass.__lt__ = __lt__
klass.__gt__ = __gt__
klass.__le__ = __le__
klass.__ge__ = __ge__
klass.__eq__ = __eq__
klass.__ne__ = __ne__
return klass
if _PY3:
unicode = str
else:
unicode = unicode
def nativeString(s):
"""
Convert C{bytes} or C{unicode} to the native C{str} type, using ASCII
encoding if conversion is necessary.
@raise UnicodeError: The input string is not ASCII encodable/decodable.
@raise TypeError: The input is neither C{bytes} nor C{unicode}.
"""
if not isinstance(s, (bytes, unicode)):
raise TypeError("%r is neither bytes nor unicode" % s)
if _PY3:
if isinstance(s, bytes):
return s.decode("ascii")
else:
# Ensure we're limited to ASCII subset:
s.encode("ascii")
else:
if isinstance(s, unicode):
return s.encode("ascii")
else:
# Ensure we're limited to ASCII subset:
s.decode("ascii")
return s
if _PY3:
def reraise(exception, traceback):
raise exception.with_traceback(traceback)
else:
exec("""def reraise(exception, traceback):
raise exception.__class__, exception, traceback""")
reraise.__doc__ = """
Re-raise an exception, with an optional traceback, in a way that is compatible
with both Python 2 and Python 3.
Note that on Python 3, re-raised exceptions will be mutated, with their
C{__traceback__} attribute being set.
@param exception: The exception instance.
@param traceback: The traceback to use, or C{None} indicating a new traceback.
"""
if _PY3:
from io import StringIO as NativeStringIO
else:
from io import BytesIO as NativeStringIO
# Functions for dealing with Python 3's bytes type, which is somewhat
# different than Python 2's:
if _PY3:
def iterbytes(originalBytes):
for i in range(len(originalBytes)):
yield originalBytes[i:i+1]
def intToBytes(i):
return ("%d" % i).encode("ascii")
# Ideally we would use memoryview, but it has a number of differences from
# the Python 2 buffer() that make that impractical
# (http://bugs.python.org/issue15945, incompatiblity with pyOpenSSL due to
# PyArg_ParseTuple differences.)
def lazyByteSlice(object, offset=0, size=None):
"""
Return a copy of the given bytes-like object.
If an offset is given, the copy starts at that offset. If a size is
given, the copy will only be of that length.
@param object: C{bytes} to be copied.
@param offset: C{int}, starting index of copy.
@param size: Optional, if an C{int} is given limit the length of copy
to this size.
"""
if size is None:
return object[offset:]
else:
return object[offset:(offset + size)]
def networkString(s):
if not isinstance(s, unicode):
raise TypeError("Can only convert text to bytes on Python 3")
return s.encode('ascii')
else:
def iterbytes(originalBytes):
return originalBytes
def intToBytes(i):
return b"%d" % i
lazyByteSlice = buffer
def networkString(s):
if not isinstance(s, str):
raise TypeError("Can only pass-through bytes on Python 2")
# Ensure we're limited to ASCII subset:
s.decode('ascii')
return s
iterbytes.__doc__ = """
Return an iterable wrapper for a C{bytes} object that provides the behavior of
iterating over C{bytes} on Python 2.
In particular, the results of iteration are the individual bytes (rather than
integers as on Python 3).
@param originalBytes: A C{bytes} object that will be wrapped.
"""
intToBytes.__doc__ = """
Convert the given integer into C{bytes}, as ASCII-encoded Arab numeral.
In other words, this is equivalent to calling C{bytes} in Python 2 on an
integer.
@param i: The C{int} to convert to C{bytes}.
@rtype: C{bytes}
"""
networkString.__doc__ = """
Convert the native string type to C{bytes} if it is not already C{bytes} using
ASCII encoding if conversion is necessary.
This is useful for sending text-like bytes that are constructed using string
interpolation. For example, this is safe on Python 2 and Python 3:
networkString("Hello %d" % (n,))
@param s: A native string to convert to bytes if necessary.
@type s: C{str}
@raise UnicodeError: The input string is not ASCII encodable/decodable.
@raise TypeError: The input is neither C{bytes} nor C{unicode}.
@rtype: C{bytes}
"""
try:
StringType = basestring
except NameError:
# Python 3+
StringType = str
try:
from types import InstanceType
except ImportError:
# Python 3+
InstanceType = object
try:
from types import FileType
except ImportError:
from io import IOBase
# Python 3+
FileType = IOBase
__all__ = [
"reraise",
"execfile",
"frozenset",
"reduce",
"set",
"cmp",
"comparable",
"nativeString",
"NativeStringIO",
"networkString",
"unicode",
"iterbytes",
"intToBytes",
"lazyByteSlice",
"StringType",
"InstanceType",
"FileType",
]
|
gpl-3.0
| -6,732,359,341,067,329,000 | -7,207,959,140,571,394,000 | 24.816143 | 78 | 0.593625 | false |
Architektor/PySnip
|
venv/lib/python2.7/site-packages/pip/_vendor/requests/adapters.py
|
149
|
18680
|
# -*- coding: utf-8 -*-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import os.path
import socket
from .models import Response
from .packages.urllib3.poolmanager import PoolManager, proxy_from_url
from .packages.urllib3.response import HTTPResponse
from .packages.urllib3.util import Timeout as TimeoutSauce
from .packages.urllib3.util.retry import Retry
from .compat import urlparse, basestring
from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
prepend_scheme_if_needed, get_auth_from_url, urldefragauth,
select_proxy, to_native_string)
from .structures import CaseInsensitiveDict
from .packages.urllib3.exceptions import ClosedPoolError
from .packages.urllib3.exceptions import ConnectTimeoutError
from .packages.urllib3.exceptions import HTTPError as _HTTPError
from .packages.urllib3.exceptions import MaxRetryError
from .packages.urllib3.exceptions import NewConnectionError
from .packages.urllib3.exceptions import ProxyError as _ProxyError
from .packages.urllib3.exceptions import ProtocolError
from .packages.urllib3.exceptions import ReadTimeoutError
from .packages.urllib3.exceptions import SSLError as _SSLError
from .packages.urllib3.exceptions import ResponseError
from .cookies import extract_cookies_to_jar
from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError,
ProxyError, RetryError, InvalidSchema)
from .auth import _basic_auth_str
try:
from .packages.urllib3.contrib.socks import SOCKSProxyManager
except ImportError:
def SOCKSProxyManager(*args, **kwargs):
raise InvalidSchema("Missing dependencies for SOCKS support.")
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
DEFAULT_POOL_TIMEOUT = None
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(self):
raise NotImplementedError
def close(self):
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param max_retries: The maximum number of retries each connection
should attempt. Note, this applies only to failed DNS lookups, socket
connections and connection timeouts, never to requests where data has
made it to the server. By default, Requests does not retry failed
connections. If you need granular control over the conditions under
which we retry a request, import urllib3's ``Retry`` class and pass
that instead.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
if max_retries == DEFAULT_RETRIES:
self.max_retries = Retry(0, read=False)
else:
self.max_retries = Retry.from_int(max_retries)
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in
self.__attrs__)
def __setstate__(self, state):
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
# self.poolmanager uses a lambda function, which isn't pickleable.
self.proxy_manager = {}
self.config = {}
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
"""Initializes a urllib3 PoolManager.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
:param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block, strict=True, **pool_kwargs)
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Return urllib3 ProxyManager for the given proxy.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The proxy to return a urllib3 ProxyManager for.
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
:returns: ProxyManager
"""
if proxy in self.proxy_manager:
manager = self.proxy_manager[proxy]
elif proxy.lower().startswith('socks'):
username, password = get_auth_from_url(proxy)
manager = self.proxy_manager[proxy] = SOCKSProxyManager(
proxy,
username=username,
password=password,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs
)
else:
proxy_headers = self.proxy_headers(proxy)
manager = self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs)
return manager
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Whether we should actually verify the certificate.
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = DEFAULT_CA_BUNDLE_PATH
if not cert_loc:
raise Exception("Could not find a suitable SSL CA certificate bundle.")
conn.cert_reqs = 'CERT_REQUIRED'
if not os.path.isdir(cert_loc):
conn.ca_certs = cert_loc
else:
conn.ca_cert_dir = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
conn.ca_cert_dir = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
"""
proxy = select_proxy(url, proxies)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Disposes of any internal state.
Currently, this closes the PoolManager and any active ProxyManager,
which closes any pooled connections.
"""
self.poolmanager.clear()
for proxy in self.proxy_manager.values():
proxy.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs.
"""
proxy = select_proxy(request.url, proxies)
scheme = urlparse(request.url).scheme
is_proxied_http_request = (proxy and scheme != 'https')
using_socks_proxy = False
if proxy:
proxy_scheme = urlparse(proxy).scheme.lower()
using_socks_proxy = proxy_scheme.startswith('socks')
url = request.path_url
if is_proxied_http_request and not using_socks_proxy:
url = urldefragauth(request.url)
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
pass
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxies: The url of the proxy being used for this request.
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param verify: (optional) Whether to verify SSL certificates.
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
conn = self.get_connection(request.url, proxies)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request)
chunked = not (request.body is None or 'Content-Length' in request.headers)
if isinstance(timeout, tuple):
try:
connect, read = timeout
timeout = TimeoutSauce(connect=connect, read=read)
except ValueError as e:
# this may raise a string formatting error.
err = ("Invalid timeout {0}. Pass a (connect, read) "
"timeout tuple, or a single float to set "
"both timeouts to the same value".format(timeout))
raise ValueError(err)
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT)
try:
low_conn.putrequest(request.method,
url,
skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
# Receive the response from the server
try:
# For Python 2.7+ versions, use buffering of HTTP
# responses
r = low_conn.getresponse(buffering=True)
except TypeError:
# For compatibility with Python 2.6 versions and back
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
except (ProtocolError, socket.error) as err:
raise ConnectionError(err, request=request)
except MaxRetryError as e:
if isinstance(e.reason, ConnectTimeoutError):
# TODO: Remove this in 3.0.0: see #2811
if not isinstance(e.reason, NewConnectionError):
raise ConnectTimeout(e, request=request)
if isinstance(e.reason, ResponseError):
raise RetryError(e, request=request)
if isinstance(e.reason, _ProxyError):
raise ProxyError(e, request=request)
raise ConnectionError(e, request=request)
except ClosedPoolError as e:
raise ConnectionError(e, request=request)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
raise SSLError(e, request=request)
elif isinstance(e, ReadTimeoutError):
raise ReadTimeout(e, request=request)
else:
raise
return self.build_response(request, resp)
|
gpl-3.0
| 7,338,835,515,558,468,000 | 2,585,667,699,752,359,000 | 37.674948 | 97 | 0.603694 | false |
mtconley/turntable
|
test/lib/python2.7/site-packages/numpy/doc/constants.py
|
172
|
8954
|
"""
=========
Constants
=========
Numpy includes several constants:
%(constant_list)s
"""
#
# Note: the docstring is autogenerated.
#
from __future__ import division, absolute_import, print_function
import textwrap, re
# Maintain same format as in numpy.add_newdocs
constants = []
def add_newdoc(module, name, doc):
constants.append((name, doc))
add_newdoc('numpy', 'Inf',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'Infinity',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'NAN',
"""
IEEE 754 floating point representation of Not a Number (NaN).
`NaN` and `NAN` are equivalent definitions of `nan`. Please use
`nan` instead of `NAN`.
See Also
--------
nan
""")
add_newdoc('numpy', 'NINF',
"""
IEEE 754 floating point representation of negative infinity.
Returns
-------
y : float
A floating point representation of negative infinity.
See Also
--------
isinf : Shows which elements are positive or negative infinity
isposinf : Shows which elements are positive infinity
isneginf : Shows which elements are negative infinity
isnan : Shows which elements are Not a Number
isfinite : Shows which elements are finite (not one of Not a Number,
positive infinity and negative infinity)
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity.
Examples
--------
>>> np.NINF
-inf
>>> np.log(0)
-inf
""")
add_newdoc('numpy', 'NZERO',
"""
IEEE 754 floating point representation of negative zero.
Returns
-------
y : float
A floating point representation of negative zero.
See Also
--------
PZERO : Defines positive zero.
isinf : Shows which elements are positive or negative infinity.
isposinf : Shows which elements are positive infinity.
isneginf : Shows which elements are negative infinity.
isnan : Shows which elements are Not a Number.
isfinite : Shows which elements are finite - not one of
Not a Number, positive infinity and negative infinity.
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). Negative zero is considered to be a finite number.
Examples
--------
>>> np.NZERO
-0.0
>>> np.PZERO
0.0
>>> np.isfinite([np.NZERO])
array([ True], dtype=bool)
>>> np.isnan([np.NZERO])
array([False], dtype=bool)
>>> np.isinf([np.NZERO])
array([False], dtype=bool)
""")
add_newdoc('numpy', 'NaN',
"""
IEEE 754 floating point representation of Not a Number (NaN).
`NaN` and `NAN` are equivalent definitions of `nan`. Please use
`nan` instead of `NaN`.
See Also
--------
nan
""")
add_newdoc('numpy', 'PINF',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'PZERO',
"""
IEEE 754 floating point representation of positive zero.
Returns
-------
y : float
A floating point representation of positive zero.
See Also
--------
NZERO : Defines negative zero.
isinf : Shows which elements are positive or negative infinity.
isposinf : Shows which elements are positive infinity.
isneginf : Shows which elements are negative infinity.
isnan : Shows which elements are Not a Number.
isfinite : Shows which elements are finite - not one of
Not a Number, positive infinity and negative infinity.
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). Positive zero is considered to be a finite number.
Examples
--------
>>> np.PZERO
0.0
>>> np.NZERO
-0.0
>>> np.isfinite([np.PZERO])
array([ True], dtype=bool)
>>> np.isnan([np.PZERO])
array([False], dtype=bool)
>>> np.isinf([np.PZERO])
array([False], dtype=bool)
""")
add_newdoc('numpy', 'e',
"""
Euler's constant, base of natural logarithms, Napier's constant.
``e = 2.71828182845904523536028747135266249775724709369995...``
See Also
--------
exp : Exponential function
log : Natural logarithm
References
----------
.. [1] http://en.wikipedia.org/wiki/Napier_constant
""")
add_newdoc('numpy', 'inf',
"""
IEEE 754 floating point representation of (positive) infinity.
Returns
-------
y : float
A floating point representation of positive infinity.
See Also
--------
isinf : Shows which elements are positive or negative infinity
isposinf : Shows which elements are positive infinity
isneginf : Shows which elements are negative infinity
isnan : Shows which elements are Not a Number
isfinite : Shows which elements are finite (not one of Not a Number,
positive infinity and negative infinity)
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity.
`Inf`, `Infinity`, `PINF` and `infty` are aliases for `inf`.
Examples
--------
>>> np.inf
inf
>>> np.array([1]) / 0.
array([ Inf])
""")
add_newdoc('numpy', 'infty',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'nan',
"""
IEEE 754 floating point representation of Not a Number (NaN).
Returns
-------
y : A floating point representation of Not a Number.
See Also
--------
isnan : Shows which elements are Not a Number.
isfinite : Shows which elements are finite (not one of
Not a Number, positive infinity and negative infinity)
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
`NaN` and `NAN` are aliases of `nan`.
Examples
--------
>>> np.nan
nan
>>> np.log(-1)
nan
>>> np.log([-1, 1, 2])
array([ NaN, 0. , 0.69314718])
""")
add_newdoc('numpy', 'newaxis',
"""
A convenient alias for None, useful for indexing arrays.
See Also
--------
`numpy.doc.indexing`
Examples
--------
>>> newaxis is None
True
>>> x = np.arange(3)
>>> x
array([0, 1, 2])
>>> x[:, newaxis]
array([[0],
[1],
[2]])
>>> x[:, newaxis, newaxis]
array([[[0]],
[[1]],
[[2]]])
>>> x[:, newaxis] * x
array([[0, 0, 0],
[0, 1, 2],
[0, 2, 4]])
Outer product, same as ``outer(x, y)``:
>>> y = np.arange(3, 6)
>>> x[:, newaxis] * y
array([[ 0, 0, 0],
[ 3, 4, 5],
[ 6, 8, 10]])
``x[newaxis, :]`` is equivalent to ``x[newaxis]`` and ``x[None]``:
>>> x[newaxis, :].shape
(1, 3)
>>> x[newaxis].shape
(1, 3)
>>> x[None].shape
(1, 3)
>>> x[:, newaxis].shape
(3, 1)
""")
if __doc__:
constants_str = []
constants.sort()
for name, doc in constants:
s = textwrap.dedent(doc).replace("\n", "\n ")
# Replace sections by rubrics
lines = s.split("\n")
new_lines = []
for line in lines:
m = re.match(r'^(\s+)[-=]+\s*$', line)
if m and new_lines:
prev = textwrap.dedent(new_lines.pop())
new_lines.append('%s.. rubric:: %s' % (m.group(1), prev))
new_lines.append('')
else:
new_lines.append(line)
s = "\n".join(new_lines)
# Done.
constants_str.append(""".. const:: %s\n %s""" % (name, s))
constants_str = "\n".join(constants_str)
__doc__ = __doc__ % dict(constant_list=constants_str)
del constants_str, name, doc
del line, lines, new_lines, m, s, prev
del constants, add_newdoc
|
mit
| 1,516,561,251,986,657,300 | -3,724,150,879,127,526,400 | 21.783715 | 75 | 0.583985 | false |
gcblue/gcblue
|
bin/Lib/test/test_dictcomps.py
|
93
|
3849
|
import unittest
from test import test_support as support
# For scope testing.
g = "Global variable"
class DictComprehensionTest(unittest.TestCase):
def test_basics(self):
expected = {0: 10, 1: 11, 2: 12, 3: 13, 4: 14, 5: 15, 6: 16, 7: 17,
8: 18, 9: 19}
actual = {k: k + 10 for k in range(10)}
self.assertEqual(actual, expected)
expected = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9}
actual = {k: v for k in range(10) for v in range(10) if k == v}
self.assertEqual(actual, expected)
def test_scope_isolation(self):
k = "Local Variable"
expected = {0: None, 1: None, 2: None, 3: None, 4: None, 5: None,
6: None, 7: None, 8: None, 9: None}
actual = {k: None for k in range(10)}
self.assertEqual(actual, expected)
self.assertEqual(k, "Local Variable")
expected = {9: 1, 18: 2, 19: 2, 27: 3, 28: 3, 29: 3, 36: 4, 37: 4,
38: 4, 39: 4, 45: 5, 46: 5, 47: 5, 48: 5, 49: 5, 54: 6,
55: 6, 56: 6, 57: 6, 58: 6, 59: 6, 63: 7, 64: 7, 65: 7,
66: 7, 67: 7, 68: 7, 69: 7, 72: 8, 73: 8, 74: 8, 75: 8,
76: 8, 77: 8, 78: 8, 79: 8, 81: 9, 82: 9, 83: 9, 84: 9,
85: 9, 86: 9, 87: 9, 88: 9, 89: 9}
actual = {k: v for v in range(10) for k in range(v * 9, v * 10)}
self.assertEqual(k, "Local Variable")
self.assertEqual(actual, expected)
def test_scope_isolation_from_global(self):
expected = {0: None, 1: None, 2: None, 3: None, 4: None, 5: None,
6: None, 7: None, 8: None, 9: None}
actual = {g: None for g in range(10)}
self.assertEqual(actual, expected)
self.assertEqual(g, "Global variable")
expected = {9: 1, 18: 2, 19: 2, 27: 3, 28: 3, 29: 3, 36: 4, 37: 4,
38: 4, 39: 4, 45: 5, 46: 5, 47: 5, 48: 5, 49: 5, 54: 6,
55: 6, 56: 6, 57: 6, 58: 6, 59: 6, 63: 7, 64: 7, 65: 7,
66: 7, 67: 7, 68: 7, 69: 7, 72: 8, 73: 8, 74: 8, 75: 8,
76: 8, 77: 8, 78: 8, 79: 8, 81: 9, 82: 9, 83: 9, 84: 9,
85: 9, 86: 9, 87: 9, 88: 9, 89: 9}
actual = {g: v for v in range(10) for g in range(v * 9, v * 10)}
self.assertEqual(g, "Global variable")
self.assertEqual(actual, expected)
def test_global_visibility(self):
expected = {0: 'Global variable', 1: 'Global variable',
2: 'Global variable', 3: 'Global variable',
4: 'Global variable', 5: 'Global variable',
6: 'Global variable', 7: 'Global variable',
8: 'Global variable', 9: 'Global variable'}
actual = {k: g for k in range(10)}
self.assertEqual(actual, expected)
def test_local_visibility(self):
v = "Local variable"
expected = {0: 'Local variable', 1: 'Local variable',
2: 'Local variable', 3: 'Local variable',
4: 'Local variable', 5: 'Local variable',
6: 'Local variable', 7: 'Local variable',
8: 'Local variable', 9: 'Local variable'}
actual = {k: v for k in range(10)}
self.assertEqual(actual, expected)
self.assertEqual(v, "Local variable")
def test_illegal_assignment(self):
with self.assertRaisesRegexp(SyntaxError, "can't assign"):
compile("{x: y for y, x in ((1, 2), (3, 4))} = 5", "<test>",
"exec")
with self.assertRaisesRegexp(SyntaxError, "can't assign"):
compile("{x: y for y, x in ((1, 2), (3, 4))} += 5", "<test>",
"exec")
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
|
bsd-3-clause
| -1,677,876,950,545,182,700 | -2,569,880,701,675,311,000 | 41.296703 | 79 | 0.489738 | false |
sander76/home-assistant
|
homeassistant/components/sisyphus/media_player.py
|
9
|
6557
|
"""Support for track controls on the Sisyphus Kinetic Art Table."""
import aiohttp
from sisyphus_control import Track
from homeassistant.components.media_player import MediaPlayerEntity
from homeassistant.components.media_player.const import (
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SHUFFLE_SET,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
)
from homeassistant.const import (
CONF_HOST,
STATE_IDLE,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
)
from homeassistant.exceptions import PlatformNotReady
from . import DATA_SISYPHUS
MEDIA_TYPE_TRACK = "sisyphus_track"
SUPPORTED_FEATURES = (
SUPPORT_VOLUME_MUTE
| SUPPORT_VOLUME_SET
| SUPPORT_TURN_OFF
| SUPPORT_TURN_ON
| SUPPORT_PAUSE
| SUPPORT_SHUFFLE_SET
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_PLAY
)
async def async_setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up a media player entity for a Sisyphus table."""
host = discovery_info[CONF_HOST]
try:
table_holder = hass.data[DATA_SISYPHUS][host]
table = await table_holder.get_table()
except aiohttp.ClientError as err:
raise PlatformNotReady() from err
add_entities([SisyphusPlayer(table_holder.name, host, table)], True)
class SisyphusPlayer(MediaPlayerEntity):
"""Representation of a Sisyphus table as a media player device."""
def __init__(self, name, host, table):
"""Initialize the Sisyphus media device."""
self._name = name
self._host = host
self._table = table
async def async_added_to_hass(self):
"""Add listeners after this object has been initialized."""
self._table.add_listener(self.async_write_ha_state)
async def async_update(self):
"""Force update table state."""
await self._table.refresh()
@property
def unique_id(self):
"""Return the UUID of the table."""
return self._table.id
@property
def available(self):
"""Return true if the table is responding to heartbeats."""
return self._table.is_connected
@property
def name(self):
"""Return the name of the table."""
return self._name
@property
def state(self):
"""Return the current state of the table; sleeping maps to off."""
if self._table.state in ["homing", "playing"]:
return STATE_PLAYING
if self._table.state == "paused":
if self._table.is_sleeping:
return STATE_OFF
return STATE_PAUSED
if self._table.state == "waiting":
return STATE_IDLE
return None
@property
def volume_level(self):
"""Return the current playback speed (0..1)."""
return self._table.speed
@property
def shuffle(self):
"""Return True if the current playlist is in shuffle mode."""
return self._table.is_shuffle
async def async_set_shuffle(self, shuffle):
"""Change the shuffle mode of the current playlist."""
await self._table.set_shuffle(shuffle)
@property
def media_playlist(self):
"""Return the name of the current playlist."""
return self._table.active_playlist.name if self._table.active_playlist else None
@property
def media_title(self):
"""Return the title of the current track."""
return self._table.active_track.name if self._table.active_track else None
@property
def media_content_type(self):
"""Return the content type currently playing; i.e. a Sisyphus track."""
return MEDIA_TYPE_TRACK
@property
def media_content_id(self):
"""Return the track ID of the current track."""
return self._table.active_track.id if self._table.active_track else None
@property
def media_duration(self):
"""Return the total time it will take to run this track at the current speed."""
return self._table.active_track_total_time.total_seconds()
@property
def media_position(self):
"""Return the current position within the track."""
return (
self._table.active_track_total_time
- self._table.active_track_remaining_time
).total_seconds()
@property
def media_position_updated_at(self):
"""Return the last time we got a position update."""
return self._table.active_track_remaining_time_as_of
@property
def supported_features(self):
"""Return the features supported by this table."""
return SUPPORTED_FEATURES
@property
def media_image_url(self):
"""Return the URL for a thumbnail image of the current track."""
if self._table.active_track:
return self._table.active_track.get_thumbnail_url(Track.ThumbnailSize.LARGE)
return super.media_image_url()
async def async_turn_on(self):
"""Wake up a sleeping table."""
await self._table.wakeup()
async def async_turn_off(self):
"""Put the table to sleep."""
await self._table.sleep()
async def async_volume_down(self):
"""Slow down playback."""
await self._table.set_speed(max(0, self._table.speed - 0.1))
async def async_volume_up(self):
"""Speed up playback."""
await self._table.set_speed(min(1.0, self._table.speed + 0.1))
async def async_set_volume_level(self, volume):
"""Set playback speed (0..1)."""
await self._table.set_speed(volume)
async def async_media_play(self):
"""Start playing."""
await self._table.play()
async def async_media_pause(self):
"""Pause."""
await self._table.pause()
async def async_media_next_track(self):
"""Skip to next track."""
cur_track_index = self._get_current_track_index()
await self._table.active_playlist.play(
self._table.active_playlist.tracks[cur_track_index + 1]
)
async def async_media_previous_track(self):
"""Skip to previous track."""
cur_track_index = self._get_current_track_index()
await self._table.active_playlist.play(
self._table.active_playlist.tracks[cur_track_index - 1]
)
def _get_current_track_index(self):
for index, track in enumerate(self._table.active_playlist.tracks):
if track.id == self._table.active_track.id:
return index
return -1
|
apache-2.0
| -6,031,123,263,719,636,000 | -1,539,271,303,886,951,000 | 29.21659 | 88 | 0.630166 | false |
kamyu104/django
|
tests/signals/tests.py
|
311
|
10273
|
from __future__ import unicode_literals
from django.db import models
from django.db.models import signals
from django.dispatch import receiver
from django.test import TestCase
from django.utils import six
from .models import Author, Book, Car, Person
class BaseSignalTest(TestCase):
def setUp(self):
# Save up the number of connected signals so that we can check at the
# end that all the signals we register get properly unregistered (#9989)
self.pre_signals = (
len(signals.pre_save.receivers),
len(signals.post_save.receivers),
len(signals.pre_delete.receivers),
len(signals.post_delete.receivers),
)
def tearDown(self):
# Check that all our signals got disconnected properly.
post_signals = (
len(signals.pre_save.receivers),
len(signals.post_save.receivers),
len(signals.pre_delete.receivers),
len(signals.post_delete.receivers),
)
self.assertEqual(self.pre_signals, post_signals)
class SignalTests(BaseSignalTest):
def test_model_pre_init_and_post_init(self):
data = []
def pre_init_callback(sender, args, **kwargs):
data.append(kwargs['kwargs'])
signals.pre_init.connect(pre_init_callback)
def post_init_callback(sender, instance, **kwargs):
data.append(instance)
signals.post_init.connect(post_init_callback)
p1 = Person(first_name="John", last_name="Doe")
self.assertEqual(data, [{}, p1])
def test_save_signals(self):
data = []
def pre_save_handler(signal, sender, instance, **kwargs):
data.append(
(instance, kwargs.get("raw", False))
)
def post_save_handler(signal, sender, instance, **kwargs):
data.append(
(instance, kwargs.get("created"), kwargs.get("raw", False))
)
signals.pre_save.connect(pre_save_handler, weak=False)
signals.post_save.connect(post_save_handler, weak=False)
try:
p1 = Person.objects.create(first_name="John", last_name="Smith")
self.assertEqual(data, [
(p1, False),
(p1, True, False),
])
data[:] = []
p1.first_name = "Tom"
p1.save()
self.assertEqual(data, [
(p1, False),
(p1, False, False),
])
data[:] = []
# Calling an internal method purely so that we can trigger a "raw" save.
p1.save_base(raw=True)
self.assertEqual(data, [
(p1, True),
(p1, False, True),
])
data[:] = []
p2 = Person(first_name="James", last_name="Jones")
p2.id = 99999
p2.save()
self.assertEqual(data, [
(p2, False),
(p2, True, False),
])
data[:] = []
p2.id = 99998
p2.save()
self.assertEqual(data, [
(p2, False),
(p2, True, False),
])
finally:
signals.pre_save.disconnect(pre_save_handler)
signals.post_save.disconnect(post_save_handler)
def test_delete_signals(self):
data = []
def pre_delete_handler(signal, sender, instance, **kwargs):
data.append(
(instance, instance.id is None)
)
# #8285: signals can be any callable
class PostDeleteHandler(object):
def __init__(self, data):
self.data = data
def __call__(self, signal, sender, instance, **kwargs):
self.data.append(
(instance, instance.id is None)
)
post_delete_handler = PostDeleteHandler(data)
signals.pre_delete.connect(pre_delete_handler, weak=False)
signals.post_delete.connect(post_delete_handler, weak=False)
try:
p1 = Person.objects.create(first_name="John", last_name="Smith")
p1.delete()
self.assertEqual(data, [
(p1, False),
(p1, False),
])
data[:] = []
p2 = Person(first_name="James", last_name="Jones")
p2.id = 99999
p2.save()
p2.id = 99998
p2.save()
p2.delete()
self.assertEqual(data, [
(p2, False),
(p2, False)
])
data[:] = []
self.assertQuerysetEqual(
Person.objects.all(), [
"James Jones",
],
six.text_type
)
finally:
signals.pre_delete.disconnect(pre_delete_handler)
signals.post_delete.disconnect(post_delete_handler)
def test_decorators(self):
data = []
@receiver(signals.pre_save, weak=False)
def decorated_handler(signal, sender, instance, **kwargs):
data.append(instance)
@receiver(signals.pre_save, sender=Car, weak=False)
def decorated_handler_with_sender_arg(signal, sender, instance, **kwargs):
data.append(instance)
try:
c1 = Car.objects.create(make="Volkswagon", model="Passat")
self.assertEqual(data, [c1, c1])
finally:
signals.pre_save.disconnect(decorated_handler)
signals.pre_save.disconnect(decorated_handler_with_sender_arg, sender=Car)
def test_save_and_delete_signals_with_m2m(self):
data = []
def pre_save_handler(signal, sender, instance, **kwargs):
data.append('pre_save signal, %s' % instance)
if kwargs.get('raw'):
data.append('Is raw')
def post_save_handler(signal, sender, instance, **kwargs):
data.append('post_save signal, %s' % instance)
if 'created' in kwargs:
if kwargs['created']:
data.append('Is created')
else:
data.append('Is updated')
if kwargs.get('raw'):
data.append('Is raw')
def pre_delete_handler(signal, sender, instance, **kwargs):
data.append('pre_delete signal, %s' % instance)
data.append('instance.id is not None: %s' % (instance.id is not None))
def post_delete_handler(signal, sender, instance, **kwargs):
data.append('post_delete signal, %s' % instance)
data.append('instance.id is not None: %s' % (instance.id is not None))
signals.pre_save.connect(pre_save_handler, weak=False)
signals.post_save.connect(post_save_handler, weak=False)
signals.pre_delete.connect(pre_delete_handler, weak=False)
signals.post_delete.connect(post_delete_handler, weak=False)
try:
a1 = Author.objects.create(name='Neal Stephenson')
self.assertEqual(data, [
"pre_save signal, Neal Stephenson",
"post_save signal, Neal Stephenson",
"Is created"
])
data[:] = []
b1 = Book.objects.create(name='Snow Crash')
self.assertEqual(data, [
"pre_save signal, Snow Crash",
"post_save signal, Snow Crash",
"Is created"
])
data[:] = []
# Assigning and removing to/from m2m shouldn't generate an m2m signal.
b1.authors = [a1]
self.assertEqual(data, [])
b1.authors = []
self.assertEqual(data, [])
finally:
signals.pre_save.disconnect(pre_save_handler)
signals.post_save.disconnect(post_save_handler)
signals.pre_delete.disconnect(pre_delete_handler)
signals.post_delete.disconnect(post_delete_handler)
def test_disconnect_in_dispatch(self):
"""
Test that signals that disconnect when being called don't mess future
dispatching.
"""
class Handler(object):
def __init__(self, param):
self.param = param
self._run = False
def __call__(self, signal, sender, **kwargs):
self._run = True
signal.disconnect(receiver=self, sender=sender)
a, b = Handler(1), Handler(2)
signals.post_save.connect(a, sender=Person, weak=False)
signals.post_save.connect(b, sender=Person, weak=False)
Person.objects.create(first_name='John', last_name='Smith')
self.assertTrue(a._run)
self.assertTrue(b._run)
self.assertEqual(signals.post_save.receivers, [])
class LazyModelRefTest(BaseSignalTest):
def setUp(self):
super(LazyModelRefTest, self).setUp()
self.received = []
def receiver(self, **kwargs):
self.received.append(kwargs)
def test_invalid_sender_model_name(self):
with self.assertRaisesMessage(ValueError,
"Specified sender must either be a model or a "
"model name of the 'app_label.ModelName' form."):
signals.post_init.connect(self.receiver, sender='invalid')
def test_already_loaded_model(self):
signals.post_init.connect(
self.receiver, sender='signals.Book', weak=False
)
try:
instance = Book()
self.assertEqual(self.received, [{
'signal': signals.post_init,
'sender': Book,
'instance': instance
}])
finally:
signals.post_init.disconnect(self.receiver, sender=Book)
def test_not_loaded_model(self):
signals.post_init.connect(
self.receiver, sender='signals.Created', weak=False
)
try:
class Created(models.Model):
pass
instance = Created()
self.assertEqual(self.received, [{
'signal': signals.post_init, 'sender': Created, 'instance': instance
}])
finally:
signals.post_init.disconnect(self.receiver, sender=Created)
|
bsd-3-clause
| -5,818,668,618,582,257,000 | 2,673,929,689,660,413,400 | 33.016556 | 86 | 0.53957 | false |
droidapps/pdfreader4Android
|
deps/freetype-2.4.10/src/tools/docmaker/docmaker.py
|
463
|
2766
|
#!/usr/bin/env python
#
# DocMaker (c) 2002, 2004, 2008 David Turner <[email protected]>
#
# This program is a re-write of the original DocMaker took used
# to generate the API Reference of the FreeType font engine
# by converting in-source comments into structured HTML.
#
# This new version is capable of outputting XML data, as well
# as accepts more liberal formatting options.
#
# It also uses regular expression matching and substitution
# to speed things significantly.
#
from sources import *
from content import *
from utils import *
from formatter import *
from tohtml import *
import utils
import sys, os, time, string, glob, getopt
def usage():
print "\nDocMaker Usage information\n"
print " docmaker [options] file1 [file2 ...]\n"
print "using the following options:\n"
print " -h : print this page"
print " -t : set project title, as in '-t \"My Project\"'"
print " -o : set output directory, as in '-o mydir'"
print " -p : set documentation prefix, as in '-p ft2'"
print ""
print " --title : same as -t, as in '--title=\"My Project\"'"
print " --output : same as -o, as in '--output=mydir'"
print " --prefix : same as -p, as in '--prefix=ft2'"
def main( argv ):
"""main program loop"""
global output_dir
try:
opts, args = getopt.getopt( sys.argv[1:], \
"ht:o:p:", \
["help", "title=", "output=", "prefix="] )
except getopt.GetoptError:
usage()
sys.exit( 2 )
if args == []:
usage()
sys.exit( 1 )
# process options
#
project_title = "Project"
project_prefix = None
output_dir = None
for opt in opts:
if opt[0] in ( "-h", "--help" ):
usage()
sys.exit( 0 )
if opt[0] in ( "-t", "--title" ):
project_title = opt[1]
if opt[0] in ( "-o", "--output" ):
utils.output_dir = opt[1]
if opt[0] in ( "-p", "--prefix" ):
project_prefix = opt[1]
check_output()
# create context and processor
source_processor = SourceProcessor()
content_processor = ContentProcessor()
# retrieve the list of files to process
file_list = make_file_list( args )
for filename in file_list:
source_processor.parse_file( filename )
content_processor.parse_sources( source_processor )
# process sections
content_processor.finish()
formatter = HtmlFormatter( content_processor, project_title, project_prefix )
formatter.toc_dump()
formatter.index_dump()
formatter.section_dump_all()
# if called from the command line
#
if __name__ == '__main__':
main( sys.argv )
# eof
|
gpl-3.0
| -1,537,709,813,166,716,400 | -6,775,520,168,446,771,000 | 25.09434 | 81 | 0.587852 | false |
jruiperezv/ANALYSE
|
lms/djangoapps/psychometrics/management/commands/init_psychometrics.py
|
131
|
2392
|
#!/usr/bin/python
#
# generate pyschometrics data from tracking logs and student module data
import json
from courseware.models import StudentModule
from track.models import TrackingLog
from psychometrics.models import PsychometricData
from django.conf import settings
from django.core.management.base import BaseCommand
#db = "ocwtutor" # for debugging
#db = "default"
db = getattr(settings, 'DATABASE_FOR_PSYCHOMETRICS', 'default')
class Command(BaseCommand):
help = "initialize PsychometricData tables from StudentModule instances (and tracking data, if in SQL)."
help += "Note this is done for all courses for which StudentModule instances exist."
def handle(self, *args, **options):
# delete all pmd
#PsychometricData.objects.all().delete()
#PsychometricData.objects.using(db).all().delete()
smset = StudentModule.objects.using(db).exclude(max_grade=None)
for sm in smset:
usage_key = sm.module_state_key
if not usage_key.block_type == "problem":
continue
try:
state = json.loads(sm.state)
done = state['done']
except:
print "Oops, failed to eval state for %s (state=%s)" % (sm, sm.state)
continue
if done: # only keep if problem completed
try:
pmd = PsychometricData.objects.using(db).get(studentmodule=sm)
except PsychometricData.DoesNotExist:
pmd = PsychometricData(studentmodule=sm)
pmd.done = done
pmd.attempts = state['attempts']
# get attempt times from tracking log
uname = sm.student.username
tset = TrackingLog.objects.using(db).filter(username=uname, event_type__contains='problem_check')
tset = tset.filter(event_source='server')
tset = tset.filter(event__contains="'%s'" % usage_key)
checktimes = [x.dtcreated for x in tset]
pmd.checktimes = checktimes
if not len(checktimes) == pmd.attempts:
print "Oops, mismatch in number of attempts and check times for %s" % pmd
#print pmd
pmd.save(using=db)
print "%d PMD entries" % PsychometricData.objects.using(db).all().count()
|
agpl-3.0
| -404,559,332,141,671,740 | -8,877,818,335,327,283 | 35.242424 | 113 | 0.605769 | false |
DESatAPSU/DAWDs
|
python/origBandpass_FITSToCSV.py
|
1
|
1930
|
# Converts STD_BANDPASSES_Y3A1_FGCM_20170630_extend3000.fits to
# y3a2_std_passband_extend3000_ugrizYatm.csv
#
# To run (bash):
# python origBandpass_FITSToCSV.py > origBandpass_FITSToCSV.log 2>&1 &
#
# To run (tcsh):
# python origBandpass_FITSToCSV.py >& origBandpass_FITSToCSV.log &
#
# DLT, 2017-06-30
# based in part on scripts by Jack Mueller and Jacob Robertson.
# Initial setup...
import numpy as np
import pandas as pd
import os
import string
import shutil
import pyfits
# Be sure to edit these next two line2 appropriately...
bandsDir = '/Users/dtucker/IRAF/DECam/StdBands_Y3A2_extend3000'
inputFile = bandsDir+'/'+'STD_BANDPASSES_Y3A1_FGCM_20170630_extend3000.fits'
# List of filter bands (plus atm)...
bandList = ['g', 'r', 'i', 'z', 'Y', 'atm']
# Read in inputFile to create a reformatted version in CSV format...
hdulist = pyfits.open(inputFile)
tbdata = hdulist[1].data
# Create lists from each column...
lambdaList = tbdata['LAMBDA'].tolist()
gList = tbdata['g'].tolist()
rList = tbdata['r'].tolist()
iList = tbdata['i'].tolist()
zList = tbdata['z'].tolist()
YList = tbdata['Y'].tolist()
atmList = tbdata['atm'].tolist()
# Create pandas dataframe from the lists...
df = pd.DataFrame(np.column_stack([lambdaList,gList,rList,iList,zList,YList,atmList]),
columns=['lambda','g','r','i','z','Y','atm'])
# Output the full table as a CSV file
outputFile = bandsDir+'/'+'STD_BANDPASSES_Y3A1_FGCM_20170630_extend3000.csv'
if os.path.isfile(outputFile):
shutil.move(outputFile, outputFile+'~')
df.to_csv(outputFile,index=False)
# Output individual bands (+atm)...
for band in bandList:
outputFile = bandsDir+'/'+'STD_BANDPASSES_Y3A1_FGCM_20170630_extend3000.'+band+'.csv'
if os.path.isfile(outputFile):
shutil.move(outputFile, outputFile+'~')
columnNames = ['lambda',band]
df.to_csv(outputFile,index=False,columns=columnNames,header=False)
# Finis!
exit()
|
mit
| 2,370,203,886,282,699,300 | 2,942,387,443,158,313,500 | 29.15625 | 89 | 0.702073 | false |
Schwittleymani/ECO
|
src/python/keras_lstm/lstm_wrapper.py
|
2
|
4929
|
from keras.models import Sequential
from keras.models import load_model, model_from_json
from keras.layers import Dense, Activation, Dropout
from keras.layers import LSTM
from keras.optimizers import RMSprop
import numpy as np
import pickle
import os
class LSTMWrapper(object):
def __init__(self, maxlen, step):
self.maxlen = maxlen
self.step = step
self.name = None
def load(self, path):
text = open(path).read().lower()
print('corpus length:', len(text))
self.chars = sorted(list(set(text)))
print('total chars:', len(self.chars))
self.char_indices = dict((c, i) for i, c in enumerate(self.chars))
self.indices_char = dict((i, c) for i, c in enumerate(self.chars))
# cut the text in semi-redundant sequences of maxlen characters
sentences = []
next_chars = []
for i in range(0, len(text) - self.maxlen, self.step):
sentences.append(text[i: i + self.maxlen])
next_chars.append(text[i + self.maxlen])
print('nb sequences:', len(sentences))
print('Vectorization...')
self.X = np.zeros((len(sentences), self.maxlen, len(self.chars)), dtype=np.bool)
self.y = np.zeros((len(sentences), len(self.chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
self.X[i, t, self.char_indices[char]] = 1
self.y[i, self.char_indices[next_chars[i]]] = 1
# build the model: a single LSTM
print('Build model...')
self.model = Sequential()
self.model.add(LSTM(128, input_shape=(self.maxlen, len(self.chars))))
self.model.add(Dense(len(self.chars)))
self.model.add(Activation('softmax'))
optimizer = RMSprop(lr=0.01)
self.model.compile(loss='categorical_crossentropy', optimizer=optimizer)
def train(self, iterations, epochs, model_save_path=None, save_every=0):
# train the model
for iteration in xrange(iterations):
print('Iteration ' + str(iteration) + ' / ' + str(iterations))
self.model.fit(self.X, self.y, batch_size=128, nb_epoch=epochs)
if save_every != 0 and iteration % save_every == 0:
output_path = model_save_path + '_iteration' + str(iteration)
self.save_model(output_path)
def sample(self, diversity, seed, output_length):
output_text = seed.rjust(self.maxlen)
input_text = seed.rjust(self.maxlen)
for i in range(output_length):
x = np.zeros((1, self.maxlen, len(self.chars)))
for t, char in enumerate(input_text):
x[0, t, self.char_indices[char]] = 1.
preds = self.model.predict(x, verbose=0)[0]
next_index = self.__sample_character(preds, diversity)
next_char = self.indices_char[next_index]
input_text = input_text[1:] + next_char
output_text += next_char
return output_text
def save_model(self, path):
directory, filename = os.path.split(path)
if not os.path.exists(directory):
os.makedirs(directory)
print('Saving model to' + path)
model_json = self.model.to_json()
with open(path + '.json', 'w') as json_file:
json_file.write(model_json)
self.model.save_weights(path)
with open(path + '_chars.pkl', 'wb') as file:
pickle.dump(self.chars, file, pickle.HIGHEST_PROTOCOL)
with open(path + '_char_indices.pkl', 'wb') as file:
pickle.dump(self.char_indices, file, pickle.HIGHEST_PROTOCOL)
with open(path + '_indices_char.pkl', 'wb') as file:
pickle.dump(self.indices_char, file, pickle.HIGHEST_PROTOCOL)
def load_model(self, path):
print('Loading model from ' + path)
json_file = open(path + '.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
self.model = model_from_json(loaded_model_json)
self.model.load_weights(path)
optimizer = RMSprop(lr=0.01)
self.model.compile(loss='categorical_crossentropy', optimizer=optimizer)
with open(path + '_chars.pkl', 'rb') as file:
self.chars = pickle.load(file)
with open(path + '_char_indices.pkl', 'rb') as file:
self.char_indices = pickle.load(file)
with open(path + '_indices_char.pkl', 'rb') as file:
self.indices_char = pickle.load(file)
def __sample_character(self, preds, temperature=1.0):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
|
apache-2.0
| 5,148,189,783,047,430,000 | -422,582,246,305,907,140 | 37.507813 | 88 | 0.600325 | false |
wilvk/ansible
|
lib/ansible/modules/windows/win_lineinfile.py
|
46
|
7167
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_lineinfile
author: "Brian Lloyd <[email protected]>"
short_description: Ensure a particular line is in a file, or replace an existing line using a back-referenced regular expression.
description:
- This module will search a file for a line, and ensure that it is present or absent.
- This is primarily useful when you want to change a single line in a file only.
version_added: "2.0"
options:
path:
required: true
aliases: [ dest, destfile, name ]
description:
- The path of the file to modify.
- Note that the Windows path delimiter C(\) must be escaped as C(\\) when the line is double quoted.
- Before 2.3 this option was only usable as I(dest), I(destfile) and I(name).
regexp:
required: false
description:
- >
The regular expression to look for in every line of the file. For C(state=present), the pattern to replace if found; only the last line found
will be replaced. For C(state=absent), the pattern of the line to remove. Uses .NET compatible regular expressions;
see U(https://msdn.microsoft.com/en-us/library/hs600312%28v=vs.110%29.aspx).
state:
required: false
choices: [ present, absent ]
default: "present"
description:
- Whether the line should be there or not.
line:
required: false
description:
- Required for C(state=present). The line to insert/replace into the file. If C(backrefs) is set, may contain backreferences that will get
expanded with the C(regexp) capture groups if the regexp matches.
backrefs:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- Used with C(state=present). If set, line can contain backreferences (both positional and named) that will get populated if the C(regexp)
matches. This flag changes the operation of the module slightly; C(insertbefore) and C(insertafter) will be ignored, and if the C(regexp)
doesn't match anywhere in the file, the file will be left unchanged.
- If the C(regexp) does match, the last matching line will be replaced by the expanded line parameter.
insertafter:
required: false
default: EOF
description:
- Used with C(state=present). If specified, the line will be inserted after the last match of specified regular expression. A special value is
available; C(EOF) for inserting the line at the end of the file.
- If specified regular expression has no matches, EOF will be used instead. May not be used with C(backrefs).
choices: [ 'EOF', '*regex*' ]
insertbefore:
required: false
description:
- Used with C(state=present). If specified, the line will be inserted before the last match of specified regular expression. A value is available;
C(BOF) for inserting the line at the beginning of the file.
- If specified regular expression has no matches, the line will be inserted at the end of the file. May not be used with C(backrefs).
choices: [ 'BOF', '*regex*' ]
create:
required: false
choices: [ "yes", "no" ]
default: "no"
description:
- Used with C(state=present). If specified, the file will be created if it does not already exist. By default it will fail if the file is missing.
backup:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly.
validate:
required: false
description:
- Validation to run before copying into place. Use %s in the command to indicate the current file to validate.
- The command is passed securely so shell features like expansion and pipes won't work.
default: None
encoding:
required: false
default: "auto"
description:
- Specifies the encoding of the source text file to operate on (and thus what the output encoding will be). The default of C(auto) will cause
the module to auto-detect the encoding of the source file and ensure that the modified file is written with the same encoding.
- >
An explicit encoding can be passed as a string that is a valid value to pass to the .NET framework System.Text.Encoding.GetEncoding() method - see
U(https://msdn.microsoft.com/en-us/library/system.text.encoding%28v=vs.110%29.aspx).
- This is mostly useful with C(create=yes) if you want to create a new file with a specific encoding. If C(create=yes) is specified without a
specific encoding, the default encoding (UTF-8, no BOM) will be used.
newline:
required: false
description:
- >
Specifies the line separator style to use for the modified file. This defaults to the windows line separator (C(\r\n)). Note that the indicated
line separator will be used for file output regardless of the original line separator that appears in the input file.
choices: [ "windows", "unix" ]
default: "windows"
notes:
- As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well.
'''
EXAMPLES = r'''
# Before 2.3, option 'dest', 'destfile' or 'name' was used instead of 'path'
- win_lineinfile:
path: C:\temp\example.conf
regexp: '^name='
line: 'name=JohnDoe'
- win_lineinfile:
path: C:\temp\example.conf
regexp: '^name='
state: absent
- win_lineinfile:
path: C:\temp\example.conf
regexp: '^127\.0\.0\.1'
line: '127.0.0.1 localhost'
- win_lineinfile:
path: C:\temp\httpd.conf
regexp: '^Listen '
insertafter: '^#Listen '
line: Listen 8080
- win_lineinfile:
path: C:\temp\services
regexp: '^# port for http'
insertbefore: '^www.*80/tcp'
line: '# port for http by default'
# Create file if it doesn't exist with a specific encoding
- win_lineinfile:
path: C:\temp\utf16.txt
create: yes
encoding: utf-16
line: This is a utf-16 encoded file
# Add a line to a file and ensure the resulting file uses unix line separators
- win_lineinfile:
path: C:\temp\testfile.txt
line: Line added to file
newline: unix
# Update a line using backrefs
- win_lineinfile:
path: C:\temp\example.conf
backrefs: yes
regexp: '(^name=)'
line: '$1JohnDoe'
'''
|
gpl-3.0
| 6,150,139,871,122,209,000 | -4,202,783,500,728,564,000 | 40.668605 | 154 | 0.690247 | false |
Akasurde/pytest
|
testing/test_doctest.py
|
10
|
12556
|
from _pytest.doctest import DoctestItem, DoctestModule, DoctestTextfile
import py
class TestDoctests:
def test_collect_testtextfile(self, testdir):
w = testdir.maketxtfile(whatever="")
checkfile = testdir.maketxtfile(test_something="""
alskdjalsdk
>>> i = 5
>>> i-1
4
""")
for x in (testdir.tmpdir, checkfile):
#print "checking that %s returns custom items" % (x,)
items, reprec = testdir.inline_genitems(x)
assert len(items) == 1
assert isinstance(items[0], DoctestTextfile)
items, reprec = testdir.inline_genitems(w)
assert len(items) == 1
def test_collect_module_empty(self, testdir):
path = testdir.makepyfile(whatever="#")
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p,
'--doctest-modules')
assert len(items) == 0
def test_collect_module_single_modulelevel_doctest(self, testdir):
path = testdir.makepyfile(whatever='""">>> pass"""')
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p,
'--doctest-modules')
assert len(items) == 1
assert isinstance(items[0], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
def test_collect_module_two_doctest_one_modulelevel(self, testdir):
path = testdir.makepyfile(whatever="""
'>>> x = None'
def my_func():
">>> magic = 42 "
""")
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p,
'--doctest-modules')
assert len(items) == 2
assert isinstance(items[0], DoctestItem)
assert isinstance(items[1], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
assert items[0].parent is items[1].parent
def test_collect_module_two_doctest_no_modulelevel(self, testdir):
path = testdir.makepyfile(whatever="""
'# Empty'
def my_func():
">>> magic = 42 "
def unuseful():
'''
# This is a function
# >>> # it doesn't have any doctest
'''
def another():
'''
# This is another function
>>> import os # this one does have a doctest
'''
""")
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p,
'--doctest-modules')
assert len(items) == 2
assert isinstance(items[0], DoctestItem)
assert isinstance(items[1], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
assert items[0].parent is items[1].parent
def test_simple_doctestfile(self, testdir):
p = testdir.maketxtfile(test_doc="""
>>> x = 1
>>> x == 1
False
""")
reprec = testdir.inline_run(p, )
reprec.assertoutcome(failed=1)
def test_new_pattern(self, testdir):
p = testdir.maketxtfile(xdoc ="""
>>> x = 1
>>> x == 1
False
""")
reprec = testdir.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(failed=1)
def test_doctest_unexpected_exception(self, testdir):
testdir.maketxtfile("""
>>> i = 0
>>> 0 / i
2
""")
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines([
"*unexpected_exception*",
"*>>> i = 0*",
"*>>> 0 / i*",
"*UNEXPECTED*ZeroDivision*",
])
def test_doctest_linedata_missing(self, testdir):
testdir.tmpdir.join('hello.py').write(py.code.Source("""
class Fun(object):
@property
def test(self):
'''
>>> a = 1
>>> 1/0
'''
"""))
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines([
"*hello*",
"*EXAMPLE LOCATION UNKNOWN, not showing all tests of that example*",
"*1/0*",
"*UNEXPECTED*ZeroDivision*",
"*1 failed*",
])
def test_doctest_unex_importerror(self, testdir):
testdir.tmpdir.join("hello.py").write(py.code.Source("""
import asdalsdkjaslkdjasd
"""))
testdir.maketxtfile("""
>>> import hello
>>>
""")
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines([
"*>>> import hello",
"*UNEXPECTED*ImportError*",
"*import asdals*",
])
def test_doctestmodule(self, testdir):
p = testdir.makepyfile("""
'''
>>> x = 1
>>> x == 1
False
'''
""")
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1)
def test_doctestmodule_external_and_issue116(self, testdir):
p = testdir.mkpydir("hello")
p.join("__init__.py").write(py.code.Source("""
def somefunc():
'''
>>> i = 0
>>> i + 1
2
'''
"""))
result = testdir.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines([
'004 *>>> i = 0',
'005 *>>> i + 1',
'*Expected:',
"* 2",
"*Got:",
"* 1",
"*:5: DocTestFailure"
])
def test_txtfile_failing(self, testdir):
p = testdir.maketxtfile("""
>>> i = 0
>>> i + 1
2
""")
result = testdir.runpytest(p, "-s")
result.stdout.fnmatch_lines([
'001 >>> i = 0',
'002 >>> i + 1',
'Expected:',
" 2",
"Got:",
" 1",
"*test_txtfile_failing.txt:2: DocTestFailure"
])
def test_txtfile_with_fixtures(self, testdir):
p = testdir.maketxtfile("""
>>> dir = getfixture('tmpdir')
>>> type(dir).__name__
'LocalPath'
""")
reprec = testdir.inline_run(p, )
reprec.assertoutcome(passed=1)
def test_txtfile_with_usefixtures_in_ini(self, testdir):
testdir.makeini("""
[pytest]
usefixtures = myfixture
""")
testdir.makeconftest("""
import pytest
@pytest.fixture
def myfixture(monkeypatch):
monkeypatch.setenv("HELLO", "WORLD")
""")
p = testdir.maketxtfile("""
>>> import os
>>> os.environ["HELLO"]
'WORLD'
""")
reprec = testdir.inline_run(p, )
reprec.assertoutcome(passed=1)
def test_doctestmodule_with_fixtures(self, testdir):
p = testdir.makepyfile("""
'''
>>> dir = getfixture('tmpdir')
>>> type(dir).__name__
'LocalPath'
'''
""")
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1)
def test_doctestmodule_three_tests(self, testdir):
p = testdir.makepyfile("""
'''
>>> dir = getfixture('tmpdir')
>>> type(dir).__name__
'LocalPath'
'''
def my_func():
'''
>>> magic = 42
>>> magic - 42
0
'''
def unuseful():
pass
def another():
'''
>>> import os
>>> os is os
True
'''
""")
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=3)
def test_doctestmodule_two_tests_one_fail(self, testdir):
p = testdir.makepyfile("""
class MyClass:
def bad_meth(self):
'''
>>> magic = 42
>>> magic
0
'''
def nice_meth(self):
'''
>>> magic = 42
>>> magic - 42
0
'''
""")
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1, passed=1)
def test_ignored_whitespace(self, testdir):
testdir.makeini("""
[pytest]
doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE
""")
p = testdir.makepyfile("""
class MyClass:
'''
>>> a = "foo "
>>> print(a)
foo
'''
pass
""")
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1)
def test_non_ignored_whitespace(self, testdir):
testdir.makeini("""
[pytest]
doctest_optionflags = ELLIPSIS
""")
p = testdir.makepyfile("""
class MyClass:
'''
>>> a = "foo "
>>> print(a)
foo
'''
pass
""")
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1, passed=0)
def test_ignored_whitespace_glob(self, testdir):
testdir.makeini("""
[pytest]
doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE
""")
p = testdir.maketxtfile(xdoc="""
>>> a = "foo "
>>> print(a)
foo
""")
reprec = testdir.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(passed=1)
def test_non_ignored_whitespace_glob(self, testdir):
testdir.makeini("""
[pytest]
doctest_optionflags = ELLIPSIS
""")
p = testdir.maketxtfile(xdoc="""
>>> a = "foo "
>>> print(a)
foo
""")
reprec = testdir.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(failed=1, passed=0)
def test_ignore_import_errors_on_doctest(self, testdir):
p = testdir.makepyfile("""
import asdf
def add_one(x):
'''
>>> add_one(1)
2
'''
return x + 1
""")
reprec = testdir.inline_run(p, "--doctest-modules",
"--doctest-ignore-import-errors")
reprec.assertoutcome(skipped=1, failed=1, passed=0)
def test_junit_report_for_doctest(self, testdir):
"""
#713: Fix --junit-xml option when used with --doctest-modules.
"""
p = testdir.makepyfile("""
def foo():
'''
>>> 1 + 1
3
'''
pass
""")
reprec = testdir.inline_run(p, "--doctest-modules",
"--junit-xml=junit.xml")
reprec.assertoutcome(failed=1)
def test_doctest_module_session_fixture(self, testdir):
"""Test that session fixtures are initialized for doctest modules (#768)
"""
# session fixture which changes some global data, which will
# be accessed by doctests in a module
testdir.makeconftest("""
import pytest
import sys
@pytest.yield_fixture(autouse=True, scope='session')
def myfixture():
assert not hasattr(sys, 'pytest_session_data')
sys.pytest_session_data = 1
yield
del sys.pytest_session_data
""")
testdir.makepyfile(foo="""
import sys
def foo():
'''
>>> assert sys.pytest_session_data == 1
'''
def bar():
'''
>>> assert sys.pytest_session_data == 1
'''
""")
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines('*2 passed*')
|
mit
| 5,594,005,945,232,678,000 | 5,239,653,967,752,777,000 | 30.156328 | 80 | 0.455957 | false |
rubgombar1/sharing-cars
|
sharingcars/common/migrations/0001_initial.py
|
1
|
4372
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-05-02 14:13
from __future__ import unicode_literals
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import sharingcars.helpers.User
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0007_alter_validators_add_error_messages'),
]
operations = [
migrations.CreateModel(
name='Actor',
fields=[
('user_account', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=256)),
('comment', models.TextField()),
('rating', models.IntegerField(validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(10)])),
],
),
migrations.CreateModel(
name='Folder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=256)),
('body', models.TextField(max_length=256)),
('creationMoment', models.DateTimeField(auto_now_add=True)),
('folder', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='common.Folder')),
],
),
migrations.CreateModel(
name='Administrator',
fields=[
('actor_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='common.Actor')),
],
bases=('common.actor',),
),
migrations.CreateModel(
name='User',
fields=[
('actor_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='common.Actor')),
('name', models.CharField(max_length=256)),
('surnames', models.CharField(max_length=256)),
('city', models.CharField(max_length=256)),
('birthdate', models.DateField()),
('phone', models.CharField(max_length=256, validators=[django.core.validators.RegexValidator('^\\+?\\d+$')])),
('searchinCar', models.BooleanField(default=False)),
('photo', models.ImageField(default='default', null=True, upload_to=sharingcars.helpers.User.path_generator)),
],
bases=('common.actor',),
),
migrations.AddField(
model_name='message',
name='recipient',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='recipient', to='common.Actor'),
),
migrations.AddField(
model_name='message',
name='sender',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sender', to='common.Actor'),
),
migrations.AddField(
model_name='folder',
name='actor',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='common.Actor'),
),
migrations.AddField(
model_name='comment',
name='evaluated',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='evaluated', to='common.User'),
),
migrations.AddField(
model_name='comment',
name='referrer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='referrer', to='common.User'),
),
]
|
gpl-3.0
| 2,001,951,944,377,846,800 | -5,023,826,774,632,661,000 | 43.161616 | 188 | 0.583715 | false |
netzary/Kaline
|
ldapdb/models/__init__.py
|
2
|
1719
|
# -*- coding: utf-8 -*-
#
# django-ldapdb
# Copyright (c) 2009-2011, Bolloré telecom
# All rights reserved.
#
# See AUTHORS file for a full list of contributors.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Bolloré telecom nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from ldapdb.models.base import Model
|
mit
| -2,397,637,540,290,842,000 | 8,152,462,042,020,074,000 | 48.057143 | 82 | 0.761211 | false |
bbaumer/ansible-modules-core
|
system/cron.py
|
39
|
17136
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2012, Dane Summers <[email protected]>
# (c) 2013, Mike Grozak <[email protected]>
# (c) 2013, Patrick Callahan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Cron Plugin: The goal of this plugin is to provide an indempotent method for
# setting up cron jobs on a host. The script will play well with other manually
# entered crons. Each cron job entered will be preceded with a comment
# describing the job so that it can be found later, which is required to be
# present in order for this plugin to find/modify the job.
#
# This module is based on python-crontab by Martin Owens.
#
DOCUMENTATION = """
---
module: cron
short_description: Manage cron.d and crontab entries.
description:
- Use this module to manage crontab entries. This module allows you to create named
crontab entries, update, or delete them.
- 'The module includes one line with the description of the crontab entry C("#Ansible: <name>")
corresponding to the "name" passed to the module, which is used by future ansible/module calls
to find/check the state. The "name" parameter should be unique, and changing the "name" value
will result in a new cron task being created (or a different one being removed)'
version_added: "0.9"
options:
name:
description:
- Description of a crontab entry.
default: null
required: false
user:
description:
- The specific user whose crontab should be modified.
required: false
default: root
job:
description:
- The command to execute. Required if state=present.
required: false
default: null
state:
description:
- Whether to ensure the job is present or absent.
required: false
default: present
choices: [ "present", "absent" ]
cron_file:
description:
- If specified, uses this file in cron.d instead of an individual user's crontab.
required: false
default: null
backup:
description:
- If set, create a backup of the crontab before it is modified.
The location of the backup is returned in the C(backup_file) variable by this module.
required: false
choices: [ "yes", "no" ]
default: no
minute:
description:
- Minute when the job should run ( 0-59, *, */2, etc )
required: false
default: "*"
hour:
description:
- Hour when the job should run ( 0-23, *, */2, etc )
required: false
default: "*"
day:
description:
- Day of the month the job should run ( 1-31, *, */2, etc )
required: false
default: "*"
aliases: [ "dom" ]
month:
description:
- Month of the year the job should run ( 1-12, *, */2, etc )
required: false
default: "*"
weekday:
description:
- Day of the week that the job should run ( 0-6 for Sunday-Saturday, *, etc )
required: false
default: "*"
aliases: [ "dow" ]
reboot:
description:
- If the job should be run at reboot. This option is deprecated. Users should use special_time.
version_added: "1.0"
required: false
default: "no"
choices: [ "yes", "no" ]
special_time:
description:
- Special time specification nickname.
version_added: "1.3"
required: false
default: null
choices: [ "reboot", "yearly", "annually", "monthly", "weekly", "daily", "hourly" ]
requirements:
- cron
author: "Dane Summers (@dsummersl)"
updates: [ 'Mike Grozak', 'Patrick Callahan' ]
"""
EXAMPLES = '''
# Ensure a job that runs at 2 and 5 exists.
# Creates an entry like "0 5,2 * * ls -alh > /dev/null"
- cron: name="check dirs" minute="0" hour="5,2" job="ls -alh > /dev/null"
# Ensure an old job is no longer present. Removes any job that is prefixed
# by "#Ansible: an old job" from the crontab
- cron: name="an old job" state=absent
# Creates an entry like "@reboot /some/job.sh"
- cron: name="a job for reboot" special_time=reboot job="/some/job.sh"
# Creates a cron file under /etc/cron.d
- cron: name="yum autoupdate" weekday="2" minute=0 hour=12
user="root" job="YUMINTERACTIVE=0 /usr/sbin/yum-autoupdate"
cron_file=ansible_yum-autoupdate
# Removes a cron file from under /etc/cron.d
- cron: name="yum autoupdate" cron_file=ansible_yum-autoupdate state=absent
'''
import os
import re
import tempfile
import platform
import pipes
CRONCMD = "/usr/bin/crontab"
class CronTabError(Exception):
pass
class CronTab(object):
"""
CronTab object to write time based crontab file
user - the user of the crontab (defaults to root)
cron_file - a cron file under /etc/cron.d
"""
def __init__(self, module, user=None, cron_file=None):
self.module = module
self.user = user
self.root = (os.getuid() == 0)
self.lines = None
self.ansible = "#Ansible: "
# select whether we dump additional debug info through syslog
self.syslogging = False
if cron_file:
self.cron_file = '/etc/cron.d/%s' % cron_file
else:
self.cron_file = None
self.read()
def read(self):
# Read in the crontab from the system
self.lines = []
if self.cron_file:
# read the cronfile
try:
f = open(self.cron_file, 'r')
self.lines = f.read().splitlines()
f.close()
except IOError, e:
# cron file does not exist
return
except:
raise CronTabError("Unexpected error:", sys.exc_info()[0])
else:
# using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME
(rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True)
if rc != 0 and rc != 1: # 1 can mean that there are no jobs.
raise CronTabError("Unable to read crontab")
lines = out.splitlines()
count = 0
for l in lines:
if count > 2 or (not re.match( r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l) and
not re.match( r'# \(/tmp/.*installed on.*\)', l) and
not re.match( r'# \(.*version.*\)', l)):
self.lines.append(l)
count += 1
def log_message(self, message):
if self.syslogging:
syslog.syslog(syslog.LOG_NOTICE, 'ansible: "%s"' % message)
def is_empty(self):
if len(self.lines) == 0:
return True
else:
return False
def write(self, backup_file=None):
"""
Write the crontab to the system. Saves all information.
"""
if backup_file:
fileh = open(backup_file, 'w')
elif self.cron_file:
fileh = open(self.cron_file, 'w')
else:
filed, path = tempfile.mkstemp(prefix='crontab')
fileh = os.fdopen(filed, 'w')
fileh.write(self.render())
fileh.close()
# return if making a backup
if backup_file:
return
# Add the entire crontab back to the user crontab
if not self.cron_file:
# quoting shell args for now but really this should be two non-shell calls. FIXME
(rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True)
os.unlink(path)
if rc != 0:
self.module.fail_json(msg=err)
def add_job(self, name, job):
# Add the comment
self.lines.append("%s%s" % (self.ansible, name))
# Add the job
self.lines.append("%s" % (job))
def update_job(self, name, job):
return self._update_job(name, job, self.do_add_job)
def do_add_job(self, lines, comment, job):
lines.append(comment)
lines.append("%s" % (job))
def remove_job(self, name):
return self._update_job(name, "", self.do_remove_job)
def do_remove_job(self, lines, comment, job):
return None
def remove_job_file(self):
try:
os.unlink(self.cron_file)
return True
except OSError, e:
# cron file does not exist
return False
except:
raise CronTabError("Unexpected error:", sys.exc_info()[0])
def find_job(self, name):
comment = None
for l in self.lines:
if comment is not None:
if comment == name:
return [comment, l]
else:
comment = None
elif re.match( r'%s' % self.ansible, l):
comment = re.sub( r'%s' % self.ansible, '', l)
return []
def get_cron_job(self,minute,hour,day,month,weekday,job,special):
if special:
if self.cron_file:
return "@%s %s %s" % (special, self.user, job)
else:
return "@%s %s" % (special, job)
else:
if self.cron_file:
return "%s %s %s %s %s %s %s" % (minute,hour,day,month,weekday,self.user,job)
else:
return "%s %s %s %s %s %s" % (minute,hour,day,month,weekday,job)
return None
def get_jobnames(self):
jobnames = []
for l in self.lines:
if re.match( r'%s' % self.ansible, l):
jobnames.append(re.sub( r'%s' % self.ansible, '', l))
return jobnames
def _update_job(self, name, job, addlinesfunction):
ansiblename = "%s%s" % (self.ansible, name)
newlines = []
comment = None
for l in self.lines:
if comment is not None:
addlinesfunction(newlines, comment, job)
comment = None
elif l == ansiblename:
comment = l
else:
newlines.append(l)
self.lines = newlines
if len(newlines) == 0:
return True
else:
return False # TODO add some more error testing
def render(self):
"""
Render this crontab as it would be in the crontab.
"""
crons = []
for cron in self.lines:
crons.append(cron)
result = '\n'.join(crons)
if result and result[-1] not in ['\n', '\r']:
result += '\n'
return result
def _read_user_execute(self):
"""
Returns the command line for reading a crontab
"""
user = ''
if self.user:
if platform.system() == 'SunOS':
return "su %s -c '%s -l'" % (pipes.quote(self.user), pipes.quote(CRONCMD))
elif platform.system() == 'AIX':
return "%s -l %s" % (pipes.quote(CRONCMD), pipes.quote(self.user))
elif platform.system() == 'HP-UX':
return "%s %s %s" % (CRONCMD , '-l', pipes.quote(self.user))
else:
user = '-u %s' % pipes.quote(self.user)
return "%s %s %s" % (CRONCMD , user, '-l')
def _write_execute(self, path):
"""
Return the command line for writing a crontab
"""
user = ''
if self.user:
if platform.system() in ['SunOS', 'HP-UX', 'AIX']:
return "chown %s %s ; su '%s' -c '%s %s'" % (pipes.quote(self.user), pipes.quote(path), pipes.quote(self.user), CRONCMD, pipes.quote(path))
else:
user = '-u %s' % pipes.quote(self.user)
return "%s %s %s" % (CRONCMD , user, pipes.quote(path))
#==================================================
def main():
# The following example playbooks:
#
# - cron: name="check dirs" hour="5,2" job="ls -alh > /dev/null"
#
# - name: do the job
# cron: name="do the job" hour="5,2" job="/some/dir/job.sh"
#
# - name: no job
# cron: name="an old job" state=absent
#
# Would produce:
# # Ansible: check dirs
# * * 5,2 * * ls -alh > /dev/null
# # Ansible: do the job
# * * 5,2 * * /some/dir/job.sh
module = AnsibleModule(
argument_spec = dict(
name=dict(required=False),
user=dict(required=False),
job=dict(required=False),
cron_file=dict(required=False),
state=dict(default='present', choices=['present', 'absent']),
backup=dict(default=False, type='bool'),
minute=dict(default='*'),
hour=dict(default='*'),
day=dict(aliases=['dom'], default='*'),
month=dict(default='*'),
weekday=dict(aliases=['dow'], default='*'),
reboot=dict(required=False, default=False, type='bool'),
special_time=dict(required=False,
default=None,
choices=["reboot", "yearly", "annually", "monthly", "weekly", "daily", "hourly"],
type='str')
),
supports_check_mode = False,
)
name = module.params['name']
user = module.params['user']
job = module.params['job']
cron_file = module.params['cron_file']
state = module.params['state']
backup = module.params['backup']
minute = module.params['minute']
hour = module.params['hour']
day = module.params['day']
month = module.params['month']
weekday = module.params['weekday']
reboot = module.params['reboot']
special_time = module.params['special_time']
do_install = state == 'present'
changed = False
res_args = dict()
# Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option.
os.umask(022)
crontab = CronTab(module, user, cron_file)
if crontab.syslogging:
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'cron instantiated - name: "%s"' % name)
# --- user input validation ---
if (special_time or reboot) and \
(True in [(x != '*') for x in [minute, hour, day, month, weekday]]):
module.fail_json(msg="You must specify time and date fields or special time.")
if cron_file and do_install:
if not user:
module.fail_json(msg="To use cron_file=... parameter you must specify user=... as well")
if reboot and special_time:
module.fail_json(msg="reboot and special_time are mutually exclusive")
if name is None and do_install:
module.fail_json(msg="You must specify 'name' to install a new cron job")
if job is None and do_install:
module.fail_json(msg="You must specify 'job' to install a new cron job")
if job and name is None and not do_install:
module.fail_json(msg="You must specify 'name' to remove a cron job")
if reboot:
if special_time:
module.fail_json(msg="reboot and special_time are mutually exclusive")
else:
special_time = "reboot"
# if requested make a backup before making a change
if backup:
(backuph, backup_file) = tempfile.mkstemp(prefix='crontab')
crontab.write(backup_file)
if crontab.cron_file and not name and not do_install:
changed = crontab.remove_job_file()
module.exit_json(changed=changed,cron_file=cron_file,state=state)
job = crontab.get_cron_job(minute, hour, day, month, weekday, job, special_time)
old_job = crontab.find_job(name)
if do_install:
if len(old_job) == 0:
crontab.add_job(name, job)
changed = True
if len(old_job) > 0 and old_job[1] != job:
crontab.update_job(name, job)
changed = True
else:
if len(old_job) > 0:
crontab.remove_job(name)
changed = True
res_args = dict(
jobs = crontab.get_jobnames(), changed = changed
)
if changed:
crontab.write()
# retain the backup only if crontab or cron file have changed
if backup:
if changed:
res_args['backup_file'] = backup_file
else:
os.unlink(backup_file)
if cron_file:
res_args['cron_file'] = cron_file
module.exit_json(**res_args)
# --- should never get here
module.exit_json(msg="Unable to execute cron task.")
# import module snippets
from ansible.module_utils.basic import *
main()
|
gpl-3.0
| -6,757,778,561,725,139,000 | -512,236,534,766,052,600 | 31.64 | 155 | 0.572362 | false |
jiangzhuo/kbengine
|
kbe/src/lib/python/Lib/test/test_bigmem.py
|
123
|
45241
|
"""Bigmem tests - tests for the 32-bit boundary in containers.
These tests try to exercise the 32-bit boundary that is sometimes, if
rarely, exceeded in practice, but almost never tested. They are really only
meaningful on 64-bit builds on machines with a *lot* of memory, but the
tests are always run, usually with very low memory limits to make sure the
tests themselves don't suffer from bitrot. To run them for real, pass a
high memory limit to regrtest, with the -M option.
"""
from test import support
from test.support import bigmemtest, _1G, _2G, _4G
import unittest
import operator
import sys
import functools
# These tests all use one of the bigmemtest decorators to indicate how much
# memory they use and how much memory they need to be even meaningful. The
# decorators take two arguments: a 'memuse' indicator declaring
# (approximate) bytes per size-unit the test will use (at peak usage), and a
# 'minsize' indicator declaring a minimum *useful* size. A test that
# allocates a bytestring to test various operations near the end will have a
# minsize of at least 2Gb (or it wouldn't reach the 32-bit limit, so the
# test wouldn't be very useful) and a memuse of 1 (one byte per size-unit,
# if it allocates only one big string at a time.)
#
# When run with a memory limit set, both decorators skip tests that need
# more memory than available to be meaningful. The precisionbigmemtest will
# always pass minsize as size, even if there is much more memory available.
# The bigmemtest decorator will scale size upward to fill available memory.
#
# Bigmem testing houserules:
#
# - Try not to allocate too many large objects. It's okay to rely on
# refcounting semantics, and don't forget that 's = create_largestring()'
# doesn't release the old 's' (if it exists) until well after its new
# value has been created. Use 'del s' before the create_largestring call.
#
# - Do *not* compare large objects using assertEqual, assertIn or similar.
# It's a lengthy operation and the errormessage will be utterly useless
# due to its size. To make sure whether a result has the right contents,
# better to use the strip or count methods, or compare meaningful slices.
#
# - Don't forget to test for large indices, offsets and results and such,
# in addition to large sizes. Anything that probes the 32-bit boundary.
#
# - When repeating an object (say, a substring, or a small list) to create
# a large object, make the subobject of a length that is not a power of
# 2. That way, int-wrapping problems are more easily detected.
#
# - Despite the bigmemtest decorator, all tests will actually be called
# with a much smaller number too, in the normal test run (5Kb currently.)
# This is so the tests themselves get frequent testing.
# Consequently, always make all large allocations based on the
# passed-in 'size', and don't rely on the size being very large. Also,
# memuse-per-size should remain sane (less than a few thousand); if your
# test uses more, adjust 'size' upward, instead.
# BEWARE: it seems that one failing test can yield other subsequent tests to
# fail as well. I do not know whether it is due to memory fragmentation
# issues, or other specifics of the platform malloc() routine.
ascii_char_size = 1
ucs2_char_size = 2
ucs4_char_size = 4
class BaseStrTest:
def _test_capitalize(self, size):
_ = self.from_latin1
SUBSTR = self.from_latin1(' abc def ghi')
s = _('-') * size + SUBSTR
caps = s.capitalize()
self.assertEqual(caps[-len(SUBSTR):],
SUBSTR.capitalize())
self.assertEqual(caps.lstrip(_('-')), SUBSTR)
@bigmemtest(size=_2G + 10, memuse=1)
def test_center(self, size):
SUBSTR = self.from_latin1(' abc def ghi')
s = SUBSTR.center(size)
self.assertEqual(len(s), size)
lpadsize = rpadsize = (len(s) - len(SUBSTR)) // 2
if len(s) % 2:
lpadsize += 1
self.assertEqual(s[lpadsize:-rpadsize], SUBSTR)
self.assertEqual(s.strip(), SUBSTR.strip())
@bigmemtest(size=_2G, memuse=2)
def test_count(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
s = _('.') * size + SUBSTR
self.assertEqual(s.count(_('.')), size)
s += _('.')
self.assertEqual(s.count(_('.')), size + 1)
self.assertEqual(s.count(_(' ')), 3)
self.assertEqual(s.count(_('i')), 1)
self.assertEqual(s.count(_('j')), 0)
@bigmemtest(size=_2G, memuse=2)
def test_endswith(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
s = _('-') * size + SUBSTR
self.assertTrue(s.endswith(SUBSTR))
self.assertTrue(s.endswith(s))
s2 = _('...') + s
self.assertTrue(s2.endswith(s))
self.assertFalse(s.endswith(_('a') + SUBSTR))
self.assertFalse(SUBSTR.endswith(s))
@bigmemtest(size=_2G + 10, memuse=2)
def test_expandtabs(self, size):
_ = self.from_latin1
s = _('-') * size
tabsize = 8
self.assertTrue(s.expandtabs() == s)
del s
slen, remainder = divmod(size, tabsize)
s = _(' \t') * slen
s = s.expandtabs(tabsize)
self.assertEqual(len(s), size - remainder)
self.assertEqual(len(s.strip(_(' '))), 0)
@bigmemtest(size=_2G, memuse=2)
def test_find(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
sublen = len(SUBSTR)
s = _('').join([SUBSTR, _('-') * size, SUBSTR])
self.assertEqual(s.find(_(' ')), 0)
self.assertEqual(s.find(SUBSTR), 0)
self.assertEqual(s.find(_(' '), sublen), sublen + size)
self.assertEqual(s.find(SUBSTR, len(SUBSTR)), sublen + size)
self.assertEqual(s.find(_('i')), SUBSTR.find(_('i')))
self.assertEqual(s.find(_('i'), sublen),
sublen + size + SUBSTR.find(_('i')))
self.assertEqual(s.find(_('i'), size),
sublen + size + SUBSTR.find(_('i')))
self.assertEqual(s.find(_('j')), -1)
@bigmemtest(size=_2G, memuse=2)
def test_index(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
sublen = len(SUBSTR)
s = _('').join([SUBSTR, _('-') * size, SUBSTR])
self.assertEqual(s.index(_(' ')), 0)
self.assertEqual(s.index(SUBSTR), 0)
self.assertEqual(s.index(_(' '), sublen), sublen + size)
self.assertEqual(s.index(SUBSTR, sublen), sublen + size)
self.assertEqual(s.index(_('i')), SUBSTR.index(_('i')))
self.assertEqual(s.index(_('i'), sublen),
sublen + size + SUBSTR.index(_('i')))
self.assertEqual(s.index(_('i'), size),
sublen + size + SUBSTR.index(_('i')))
self.assertRaises(ValueError, s.index, _('j'))
@bigmemtest(size=_2G, memuse=2)
def test_isalnum(self, size):
_ = self.from_latin1
SUBSTR = _('123456')
s = _('a') * size + SUBSTR
self.assertTrue(s.isalnum())
s += _('.')
self.assertFalse(s.isalnum())
@bigmemtest(size=_2G, memuse=2)
def test_isalpha(self, size):
_ = self.from_latin1
SUBSTR = _('zzzzzzz')
s = _('a') * size + SUBSTR
self.assertTrue(s.isalpha())
s += _('.')
self.assertFalse(s.isalpha())
@bigmemtest(size=_2G, memuse=2)
def test_isdigit(self, size):
_ = self.from_latin1
SUBSTR = _('123456')
s = _('9') * size + SUBSTR
self.assertTrue(s.isdigit())
s += _('z')
self.assertFalse(s.isdigit())
@bigmemtest(size=_2G, memuse=2)
def test_islower(self, size):
_ = self.from_latin1
chars = _(''.join(
chr(c) for c in range(255) if not chr(c).isupper()))
repeats = size // len(chars) + 2
s = chars * repeats
self.assertTrue(s.islower())
s += _('A')
self.assertFalse(s.islower())
@bigmemtest(size=_2G, memuse=2)
def test_isspace(self, size):
_ = self.from_latin1
whitespace = _(' \f\n\r\t\v')
repeats = size // len(whitespace) + 2
s = whitespace * repeats
self.assertTrue(s.isspace())
s += _('j')
self.assertFalse(s.isspace())
@bigmemtest(size=_2G, memuse=2)
def test_istitle(self, size):
_ = self.from_latin1
SUBSTR = _('123456')
s = _('').join([_('A'), _('a') * size, SUBSTR])
self.assertTrue(s.istitle())
s += _('A')
self.assertTrue(s.istitle())
s += _('aA')
self.assertFalse(s.istitle())
@bigmemtest(size=_2G, memuse=2)
def test_isupper(self, size):
_ = self.from_latin1
chars = _(''.join(
chr(c) for c in range(255) if not chr(c).islower()))
repeats = size // len(chars) + 2
s = chars * repeats
self.assertTrue(s.isupper())
s += _('a')
self.assertFalse(s.isupper())
@bigmemtest(size=_2G, memuse=2)
def test_join(self, size):
_ = self.from_latin1
s = _('A') * size
x = s.join([_('aaaaa'), _('bbbbb')])
self.assertEqual(x.count(_('a')), 5)
self.assertEqual(x.count(_('b')), 5)
self.assertTrue(x.startswith(_('aaaaaA')))
self.assertTrue(x.endswith(_('Abbbbb')))
@bigmemtest(size=_2G + 10, memuse=1)
def test_ljust(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
s = SUBSTR.ljust(size)
self.assertTrue(s.startswith(SUBSTR + _(' ')))
self.assertEqual(len(s), size)
self.assertEqual(s.strip(), SUBSTR.strip())
@bigmemtest(size=_2G + 10, memuse=2)
def test_lower(self, size):
_ = self.from_latin1
s = _('A') * size
s = s.lower()
self.assertEqual(len(s), size)
self.assertEqual(s.count(_('a')), size)
@bigmemtest(size=_2G + 10, memuse=1)
def test_lstrip(self, size):
_ = self.from_latin1
SUBSTR = _('abc def ghi')
s = SUBSTR.rjust(size)
self.assertEqual(len(s), size)
self.assertEqual(s.lstrip(), SUBSTR.lstrip())
del s
s = SUBSTR.ljust(size)
self.assertEqual(len(s), size)
# Type-specific optimization
if isinstance(s, (str, bytes)):
stripped = s.lstrip()
self.assertTrue(stripped is s)
@bigmemtest(size=_2G + 10, memuse=2)
def test_replace(self, size):
_ = self.from_latin1
replacement = _('a')
s = _(' ') * size
s = s.replace(_(' '), replacement)
self.assertEqual(len(s), size)
self.assertEqual(s.count(replacement), size)
s = s.replace(replacement, _(' '), size - 4)
self.assertEqual(len(s), size)
self.assertEqual(s.count(replacement), 4)
self.assertEqual(s[-10:], _(' aaaa'))
@bigmemtest(size=_2G, memuse=2)
def test_rfind(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
sublen = len(SUBSTR)
s = _('').join([SUBSTR, _('-') * size, SUBSTR])
self.assertEqual(s.rfind(_(' ')), sublen + size + SUBSTR.rfind(_(' ')))
self.assertEqual(s.rfind(SUBSTR), sublen + size)
self.assertEqual(s.rfind(_(' '), 0, size), SUBSTR.rfind(_(' ')))
self.assertEqual(s.rfind(SUBSTR, 0, sublen + size), 0)
self.assertEqual(s.rfind(_('i')), sublen + size + SUBSTR.rfind(_('i')))
self.assertEqual(s.rfind(_('i'), 0, sublen), SUBSTR.rfind(_('i')))
self.assertEqual(s.rfind(_('i'), 0, sublen + size),
SUBSTR.rfind(_('i')))
self.assertEqual(s.rfind(_('j')), -1)
@bigmemtest(size=_2G, memuse=2)
def test_rindex(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
sublen = len(SUBSTR)
s = _('').join([SUBSTR, _('-') * size, SUBSTR])
self.assertEqual(s.rindex(_(' ')),
sublen + size + SUBSTR.rindex(_(' ')))
self.assertEqual(s.rindex(SUBSTR), sublen + size)
self.assertEqual(s.rindex(_(' '), 0, sublen + size - 1),
SUBSTR.rindex(_(' ')))
self.assertEqual(s.rindex(SUBSTR, 0, sublen + size), 0)
self.assertEqual(s.rindex(_('i')),
sublen + size + SUBSTR.rindex(_('i')))
self.assertEqual(s.rindex(_('i'), 0, sublen), SUBSTR.rindex(_('i')))
self.assertEqual(s.rindex(_('i'), 0, sublen + size),
SUBSTR.rindex(_('i')))
self.assertRaises(ValueError, s.rindex, _('j'))
@bigmemtest(size=_2G + 10, memuse=1)
def test_rjust(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
s = SUBSTR.ljust(size)
self.assertTrue(s.startswith(SUBSTR + _(' ')))
self.assertEqual(len(s), size)
self.assertEqual(s.strip(), SUBSTR.strip())
@bigmemtest(size=_2G + 10, memuse=1)
def test_rstrip(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
s = SUBSTR.ljust(size)
self.assertEqual(len(s), size)
self.assertEqual(s.rstrip(), SUBSTR.rstrip())
del s
s = SUBSTR.rjust(size)
self.assertEqual(len(s), size)
# Type-specific optimization
if isinstance(s, (str, bytes)):
stripped = s.rstrip()
self.assertTrue(stripped is s)
# The test takes about size bytes to build a string, and then about
# sqrt(size) substrings of sqrt(size) in size and a list to
# hold sqrt(size) items. It's close but just over 2x size.
@bigmemtest(size=_2G, memuse=2.1)
def test_split_small(self, size):
_ = self.from_latin1
# Crudely calculate an estimate so that the result of s.split won't
# take up an inordinate amount of memory
chunksize = int(size ** 0.5 + 2)
SUBSTR = _('a') + _(' ') * chunksize
s = SUBSTR * chunksize
l = s.split()
self.assertEqual(len(l), chunksize)
expected = _('a')
for item in l:
self.assertEqual(item, expected)
del l
l = s.split(_('a'))
self.assertEqual(len(l), chunksize + 1)
expected = _(' ') * chunksize
for item in filter(None, l):
self.assertEqual(item, expected)
# Allocates a string of twice size (and briefly two) and a list of
# size. Because of internal affairs, the s.split() call produces a
# list of size times the same one-character string, so we only
# suffer for the list size. (Otherwise, it'd cost another 48 times
# size in bytes!) Nevertheless, a list of size takes
# 8*size bytes.
@bigmemtest(size=_2G + 5, memuse=2 * ascii_char_size + 8)
def test_split_large(self, size):
_ = self.from_latin1
s = _(' a') * size + _(' ')
l = s.split()
self.assertEqual(len(l), size)
self.assertEqual(set(l), set([_('a')]))
del l
l = s.split(_('a'))
self.assertEqual(len(l), size + 1)
self.assertEqual(set(l), set([_(' ')]))
@bigmemtest(size=_2G, memuse=2.1)
def test_splitlines(self, size):
_ = self.from_latin1
# Crudely calculate an estimate so that the result of s.split won't
# take up an inordinate amount of memory
chunksize = int(size ** 0.5 + 2) // 2
SUBSTR = _(' ') * chunksize + _('\n') + _(' ') * chunksize + _('\r\n')
s = SUBSTR * (chunksize * 2)
l = s.splitlines()
self.assertEqual(len(l), chunksize * 4)
expected = _(' ') * chunksize
for item in l:
self.assertEqual(item, expected)
@bigmemtest(size=_2G, memuse=2)
def test_startswith(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
s = _('-') * size + SUBSTR
self.assertTrue(s.startswith(s))
self.assertTrue(s.startswith(_('-') * size))
self.assertFalse(s.startswith(SUBSTR))
@bigmemtest(size=_2G, memuse=1)
def test_strip(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi ')
s = SUBSTR.rjust(size)
self.assertEqual(len(s), size)
self.assertEqual(s.strip(), SUBSTR.strip())
del s
s = SUBSTR.ljust(size)
self.assertEqual(len(s), size)
self.assertEqual(s.strip(), SUBSTR.strip())
def _test_swapcase(self, size):
_ = self.from_latin1
SUBSTR = _("aBcDeFG12.'\xa9\x00")
sublen = len(SUBSTR)
repeats = size // sublen + 2
s = SUBSTR * repeats
s = s.swapcase()
self.assertEqual(len(s), sublen * repeats)
self.assertEqual(s[:sublen * 3], SUBSTR.swapcase() * 3)
self.assertEqual(s[-sublen * 3:], SUBSTR.swapcase() * 3)
def _test_title(self, size):
_ = self.from_latin1
SUBSTR = _('SpaaHAaaAaham')
s = SUBSTR * (size // len(SUBSTR) + 2)
s = s.title()
self.assertTrue(s.startswith((SUBSTR * 3).title()))
self.assertTrue(s.endswith(SUBSTR.lower() * 3))
@bigmemtest(size=_2G, memuse=2)
def test_translate(self, size):
_ = self.from_latin1
SUBSTR = _('aZz.z.Aaz.')
trans = bytes.maketrans(b'.aZ', b'-!$')
sublen = len(SUBSTR)
repeats = size // sublen + 2
s = SUBSTR * repeats
s = s.translate(trans)
self.assertEqual(len(s), repeats * sublen)
self.assertEqual(s[:sublen], SUBSTR.translate(trans))
self.assertEqual(s[-sublen:], SUBSTR.translate(trans))
self.assertEqual(s.count(_('.')), 0)
self.assertEqual(s.count(_('!')), repeats * 2)
self.assertEqual(s.count(_('z')), repeats * 3)
@bigmemtest(size=_2G + 5, memuse=2)
def test_upper(self, size):
_ = self.from_latin1
s = _('a') * size
s = s.upper()
self.assertEqual(len(s), size)
self.assertEqual(s.count(_('A')), size)
@bigmemtest(size=_2G + 20, memuse=1)
def test_zfill(self, size):
_ = self.from_latin1
SUBSTR = _('-568324723598234')
s = SUBSTR.zfill(size)
self.assertTrue(s.endswith(_('0') + SUBSTR[1:]))
self.assertTrue(s.startswith(_('-0')))
self.assertEqual(len(s), size)
self.assertEqual(s.count(_('0')), size - len(SUBSTR))
# This test is meaningful even with size < 2G, as long as the
# doubled string is > 2G (but it tests more if both are > 2G :)
@bigmemtest(size=_1G + 2, memuse=3)
def test_concat(self, size):
_ = self.from_latin1
s = _('.') * size
self.assertEqual(len(s), size)
s = s + s
self.assertEqual(len(s), size * 2)
self.assertEqual(s.count(_('.')), size * 2)
# This test is meaningful even with size < 2G, as long as the
# repeated string is > 2G (but it tests more if both are > 2G :)
@bigmemtest(size=_1G + 2, memuse=3)
def test_repeat(self, size):
_ = self.from_latin1
s = _('.') * size
self.assertEqual(len(s), size)
s = s * 2
self.assertEqual(len(s), size * 2)
self.assertEqual(s.count(_('.')), size * 2)
@bigmemtest(size=_2G + 20, memuse=2)
def test_slice_and_getitem(self, size):
_ = self.from_latin1
SUBSTR = _('0123456789')
sublen = len(SUBSTR)
s = SUBSTR * (size // sublen)
stepsize = len(s) // 100
stepsize = stepsize - (stepsize % sublen)
for i in range(0, len(s) - stepsize, stepsize):
self.assertEqual(s[i], SUBSTR[0])
self.assertEqual(s[i:i + sublen], SUBSTR)
self.assertEqual(s[i:i + sublen:2], SUBSTR[::2])
if i > 0:
self.assertEqual(s[i + sublen - 1:i - 1:-3],
SUBSTR[sublen::-3])
# Make sure we do some slicing and indexing near the end of the
# string, too.
self.assertEqual(s[len(s) - 1], SUBSTR[-1])
self.assertEqual(s[-1], SUBSTR[-1])
self.assertEqual(s[len(s) - 10], SUBSTR[0])
self.assertEqual(s[-sublen], SUBSTR[0])
self.assertEqual(s[len(s):], _(''))
self.assertEqual(s[len(s) - 1:], SUBSTR[-1:])
self.assertEqual(s[-1:], SUBSTR[-1:])
self.assertEqual(s[len(s) - sublen:], SUBSTR)
self.assertEqual(s[-sublen:], SUBSTR)
self.assertEqual(len(s[:]), len(s))
self.assertEqual(len(s[:len(s) - 5]), len(s) - 5)
self.assertEqual(len(s[5:-5]), len(s) - 10)
self.assertRaises(IndexError, operator.getitem, s, len(s))
self.assertRaises(IndexError, operator.getitem, s, len(s) + 1)
self.assertRaises(IndexError, operator.getitem, s, len(s) + 1<<31)
@bigmemtest(size=_2G, memuse=2)
def test_contains(self, size):
_ = self.from_latin1
SUBSTR = _('0123456789')
edge = _('-') * (size // 2)
s = _('').join([edge, SUBSTR, edge])
del edge
self.assertTrue(SUBSTR in s)
self.assertFalse(SUBSTR * 2 in s)
self.assertTrue(_('-') in s)
self.assertFalse(_('a') in s)
s += _('a')
self.assertTrue(_('a') in s)
@bigmemtest(size=_2G + 10, memuse=2)
def test_compare(self, size):
_ = self.from_latin1
s1 = _('-') * size
s2 = _('-') * size
self.assertTrue(s1 == s2)
del s2
s2 = s1 + _('a')
self.assertFalse(s1 == s2)
del s2
s2 = _('.') * size
self.assertFalse(s1 == s2)
@bigmemtest(size=_2G + 10, memuse=1)
def test_hash(self, size):
# Not sure if we can do any meaningful tests here... Even if we
# start relying on the exact algorithm used, the result will be
# different depending on the size of the C 'long int'. Even this
# test is dodgy (there's no *guarantee* that the two things should
# have a different hash, even if they, in the current
# implementation, almost always do.)
_ = self.from_latin1
s = _('\x00') * size
h1 = hash(s)
del s
s = _('\x00') * (size + 1)
self.assertNotEqual(h1, hash(s))
class StrTest(unittest.TestCase, BaseStrTest):
def from_latin1(self, s):
return s
def basic_encode_test(self, size, enc, c='.', expectedsize=None):
if expectedsize is None:
expectedsize = size
try:
s = c * size
self.assertEqual(len(s.encode(enc)), expectedsize)
finally:
s = None
def setUp(self):
# HACK: adjust memory use of tests inherited from BaseStrTest
# according to character size.
self._adjusted = {}
for name in dir(BaseStrTest):
if not name.startswith('test_'):
continue
meth = getattr(type(self), name)
try:
memuse = meth.memuse
except AttributeError:
continue
meth.memuse = ascii_char_size * memuse
self._adjusted[name] = memuse
def tearDown(self):
for name, memuse in self._adjusted.items():
getattr(type(self), name).memuse = memuse
@bigmemtest(size=_2G, memuse=ucs4_char_size * 3)
def test_capitalize(self, size):
self._test_capitalize(size)
@bigmemtest(size=_2G, memuse=ucs4_char_size * 3)
def test_title(self, size):
self._test_title(size)
@bigmemtest(size=_2G, memuse=ucs4_char_size * 3)
def test_swapcase(self, size):
self._test_swapcase(size)
# Many codecs convert to the legacy representation first, explaining
# why we add 'ucs4_char_size' to the 'memuse' below.
@bigmemtest(size=_2G + 2, memuse=ascii_char_size + 1)
def test_encode(self, size):
return self.basic_encode_test(size, 'utf-8')
@bigmemtest(size=_4G // 6 + 2, memuse=ascii_char_size + ucs4_char_size + 1)
def test_encode_raw_unicode_escape(self, size):
try:
return self.basic_encode_test(size, 'raw_unicode_escape')
except MemoryError:
pass # acceptable on 32-bit
@bigmemtest(size=_4G // 5 + 70, memuse=ascii_char_size + ucs4_char_size + 1)
def test_encode_utf7(self, size):
try:
return self.basic_encode_test(size, 'utf7')
except MemoryError:
pass # acceptable on 32-bit
@bigmemtest(size=_4G // 4 + 5, memuse=ascii_char_size + ucs4_char_size + 4)
def test_encode_utf32(self, size):
try:
return self.basic_encode_test(size, 'utf32', expectedsize=4 * size + 4)
except MemoryError:
pass # acceptable on 32-bit
@bigmemtest(size=_2G - 1, memuse=ascii_char_size + 1)
def test_encode_ascii(self, size):
return self.basic_encode_test(size, 'ascii', c='A')
# str % (...) uses a Py_UCS4 intermediate representation
@bigmemtest(size=_2G + 10, memuse=ascii_char_size * 2 + ucs4_char_size)
def test_format(self, size):
s = '-' * size
sf = '%s' % (s,)
self.assertTrue(s == sf)
del sf
sf = '..%s..' % (s,)
self.assertEqual(len(sf), len(s) + 4)
self.assertTrue(sf.startswith('..-'))
self.assertTrue(sf.endswith('-..'))
del s, sf
size //= 2
edge = '-' * size
s = ''.join([edge, '%s', edge])
del edge
s = s % '...'
self.assertEqual(len(s), size * 2 + 3)
self.assertEqual(s.count('.'), 3)
self.assertEqual(s.count('-'), size * 2)
@bigmemtest(size=_2G + 10, memuse=ascii_char_size * 2)
def test_repr_small(self, size):
s = '-' * size
s = repr(s)
self.assertEqual(len(s), size + 2)
self.assertEqual(s[0], "'")
self.assertEqual(s[-1], "'")
self.assertEqual(s.count('-'), size)
del s
# repr() will create a string four times as large as this 'binary
# string', but we don't want to allocate much more than twice
# size in total. (We do extra testing in test_repr_large())
size = size // 5 * 2
s = '\x00' * size
s = repr(s)
self.assertEqual(len(s), size * 4 + 2)
self.assertEqual(s[0], "'")
self.assertEqual(s[-1], "'")
self.assertEqual(s.count('\\'), size)
self.assertEqual(s.count('0'), size * 2)
@bigmemtest(size=_2G + 10, memuse=ascii_char_size * 5)
def test_repr_large(self, size):
s = '\x00' * size
s = repr(s)
self.assertEqual(len(s), size * 4 + 2)
self.assertEqual(s[0], "'")
self.assertEqual(s[-1], "'")
self.assertEqual(s.count('\\'), size)
self.assertEqual(s.count('0'), size * 2)
# ascii() calls encode('ascii', 'backslashreplace'), which itself
# creates a temporary Py_UNICODE representation in addition to the
# original (Py_UCS2) one
# There's also some overallocation when resizing the ascii() result
# that isn't taken into account here.
@bigmemtest(size=_2G // 5 + 1, memuse=ucs2_char_size +
ucs4_char_size + ascii_char_size * 6)
def test_unicode_repr(self, size):
# Use an assigned, but not printable code point.
# It is in the range of the low surrogates \uDC00-\uDFFF.
char = "\uDCBA"
s = char * size
try:
for f in (repr, ascii):
r = f(s)
self.assertEqual(len(r), 2 + (len(f(char)) - 2) * size)
self.assertTrue(r.endswith(r"\udcba'"), r[-10:])
r = None
finally:
r = s = None
@bigmemtest(size=_2G // 5 + 1, memuse=ucs4_char_size * 2 + ascii_char_size * 10)
def test_unicode_repr_wide(self, size):
char = "\U0001DCBA"
s = char * size
try:
for f in (repr, ascii):
r = f(s)
self.assertEqual(len(r), 2 + (len(f(char)) - 2) * size)
self.assertTrue(r.endswith(r"\U0001dcba'"), r[-12:])
r = None
finally:
r = s = None
# The original test_translate is overriden here, so as to get the
# correct size estimate: str.translate() uses an intermediate Py_UCS4
# representation.
@bigmemtest(size=_2G, memuse=ascii_char_size * 2 + ucs4_char_size)
def test_translate(self, size):
_ = self.from_latin1
SUBSTR = _('aZz.z.Aaz.')
trans = {
ord(_('.')): _('-'),
ord(_('a')): _('!'),
ord(_('Z')): _('$'),
}
sublen = len(SUBSTR)
repeats = size // sublen + 2
s = SUBSTR * repeats
s = s.translate(trans)
self.assertEqual(len(s), repeats * sublen)
self.assertEqual(s[:sublen], SUBSTR.translate(trans))
self.assertEqual(s[-sublen:], SUBSTR.translate(trans))
self.assertEqual(s.count(_('.')), 0)
self.assertEqual(s.count(_('!')), repeats * 2)
self.assertEqual(s.count(_('z')), repeats * 3)
class BytesTest(unittest.TestCase, BaseStrTest):
def from_latin1(self, s):
return s.encode("latin-1")
@bigmemtest(size=_2G + 2, memuse=1 + ascii_char_size)
def test_decode(self, size):
s = self.from_latin1('.') * size
self.assertEqual(len(s.decode('utf-8')), size)
@bigmemtest(size=_2G, memuse=2)
def test_capitalize(self, size):
self._test_capitalize(size)
@bigmemtest(size=_2G, memuse=2)
def test_title(self, size):
self._test_title(size)
@bigmemtest(size=_2G, memuse=2)
def test_swapcase(self, size):
self._test_swapcase(size)
class BytearrayTest(unittest.TestCase, BaseStrTest):
def from_latin1(self, s):
return bytearray(s.encode("latin-1"))
@bigmemtest(size=_2G + 2, memuse=1 + ascii_char_size)
def test_decode(self, size):
s = self.from_latin1('.') * size
self.assertEqual(len(s.decode('utf-8')), size)
@bigmemtest(size=_2G, memuse=2)
def test_capitalize(self, size):
self._test_capitalize(size)
@bigmemtest(size=_2G, memuse=2)
def test_title(self, size):
self._test_title(size)
@bigmemtest(size=_2G, memuse=2)
def test_swapcase(self, size):
self._test_swapcase(size)
test_hash = None
test_split_large = None
class TupleTest(unittest.TestCase):
# Tuples have a small, fixed-sized head and an array of pointers to
# data. Since we're testing 64-bit addressing, we can assume that the
# pointers are 8 bytes, and that thus that the tuples take up 8 bytes
# per size.
# As a side-effect of testing long tuples, these tests happen to test
# having more than 2<<31 references to any given object. Hence the
# use of different types of objects as contents in different tests.
@bigmemtest(size=_2G + 2, memuse=16)
def test_compare(self, size):
t1 = ('',) * size
t2 = ('',) * size
self.assertTrue(t1 == t2)
del t2
t2 = ('',) * (size + 1)
self.assertFalse(t1 == t2)
del t2
t2 = (1,) * size
self.assertFalse(t1 == t2)
# Test concatenating into a single tuple of more than 2G in length,
# and concatenating a tuple of more than 2G in length separately, so
# the smaller test still gets run even if there isn't memory for the
# larger test (but we still let the tester know the larger test is
# skipped, in verbose mode.)
def basic_concat_test(self, size):
t = ((),) * size
self.assertEqual(len(t), size)
t = t + t
self.assertEqual(len(t), size * 2)
@bigmemtest(size=_2G // 2 + 2, memuse=24)
def test_concat_small(self, size):
return self.basic_concat_test(size)
@bigmemtest(size=_2G + 2, memuse=24)
def test_concat_large(self, size):
return self.basic_concat_test(size)
@bigmemtest(size=_2G // 5 + 10, memuse=8 * 5)
def test_contains(self, size):
t = (1, 2, 3, 4, 5) * size
self.assertEqual(len(t), size * 5)
self.assertTrue(5 in t)
self.assertFalse((1, 2, 3, 4, 5) in t)
self.assertFalse(0 in t)
@bigmemtest(size=_2G + 10, memuse=8)
def test_hash(self, size):
t1 = (0,) * size
h1 = hash(t1)
del t1
t2 = (0,) * (size + 1)
self.assertFalse(h1 == hash(t2))
@bigmemtest(size=_2G + 10, memuse=8)
def test_index_and_slice(self, size):
t = (None,) * size
self.assertEqual(len(t), size)
self.assertEqual(t[-1], None)
self.assertEqual(t[5], None)
self.assertEqual(t[size - 1], None)
self.assertRaises(IndexError, operator.getitem, t, size)
self.assertEqual(t[:5], (None,) * 5)
self.assertEqual(t[-5:], (None,) * 5)
self.assertEqual(t[20:25], (None,) * 5)
self.assertEqual(t[-25:-20], (None,) * 5)
self.assertEqual(t[size - 5:], (None,) * 5)
self.assertEqual(t[size - 5:size], (None,) * 5)
self.assertEqual(t[size - 6:size - 2], (None,) * 4)
self.assertEqual(t[size:size], ())
self.assertEqual(t[size:size+5], ())
# Like test_concat, split in two.
def basic_test_repeat(self, size):
t = ('',) * size
self.assertEqual(len(t), size)
t = t * 2
self.assertEqual(len(t), size * 2)
@bigmemtest(size=_2G // 2 + 2, memuse=24)
def test_repeat_small(self, size):
return self.basic_test_repeat(size)
@bigmemtest(size=_2G + 2, memuse=24)
def test_repeat_large(self, size):
return self.basic_test_repeat(size)
@bigmemtest(size=_1G - 1, memuse=12)
def test_repeat_large_2(self, size):
return self.basic_test_repeat(size)
@bigmemtest(size=_1G - 1, memuse=9)
def test_from_2G_generator(self, size):
self.skipTest("test needs much more memory than advertised, see issue5438")
try:
t = tuple(range(size))
except MemoryError:
pass # acceptable on 32-bit
else:
count = 0
for item in t:
self.assertEqual(item, count)
count += 1
self.assertEqual(count, size)
@bigmemtest(size=_1G - 25, memuse=9)
def test_from_almost_2G_generator(self, size):
self.skipTest("test needs much more memory than advertised, see issue5438")
try:
t = tuple(range(size))
count = 0
for item in t:
self.assertEqual(item, count)
count += 1
self.assertEqual(count, size)
except MemoryError:
pass # acceptable, expected on 32-bit
# Like test_concat, split in two.
def basic_test_repr(self, size):
t = (0,) * size
s = repr(t)
# The repr of a tuple of 0's is exactly three times the tuple length.
self.assertEqual(len(s), size * 3)
self.assertEqual(s[:5], '(0, 0')
self.assertEqual(s[-5:], '0, 0)')
self.assertEqual(s.count('0'), size)
@bigmemtest(size=_2G // 3 + 2, memuse=8 + 3 * ascii_char_size)
def test_repr_small(self, size):
return self.basic_test_repr(size)
@bigmemtest(size=_2G + 2, memuse=8 + 3 * ascii_char_size)
def test_repr_large(self, size):
return self.basic_test_repr(size)
class ListTest(unittest.TestCase):
# Like tuples, lists have a small, fixed-sized head and an array of
# pointers to data, so 8 bytes per size. Also like tuples, we make the
# lists hold references to various objects to test their refcount
# limits.
@bigmemtest(size=_2G + 2, memuse=16)
def test_compare(self, size):
l1 = [''] * size
l2 = [''] * size
self.assertTrue(l1 == l2)
del l2
l2 = [''] * (size + 1)
self.assertFalse(l1 == l2)
del l2
l2 = [2] * size
self.assertFalse(l1 == l2)
# Test concatenating into a single list of more than 2G in length,
# and concatenating a list of more than 2G in length separately, so
# the smaller test still gets run even if there isn't memory for the
# larger test (but we still let the tester know the larger test is
# skipped, in verbose mode.)
def basic_test_concat(self, size):
l = [[]] * size
self.assertEqual(len(l), size)
l = l + l
self.assertEqual(len(l), size * 2)
@bigmemtest(size=_2G // 2 + 2, memuse=24)
def test_concat_small(self, size):
return self.basic_test_concat(size)
@bigmemtest(size=_2G + 2, memuse=24)
def test_concat_large(self, size):
return self.basic_test_concat(size)
def basic_test_inplace_concat(self, size):
l = [sys.stdout] * size
l += l
self.assertEqual(len(l), size * 2)
self.assertTrue(l[0] is l[-1])
self.assertTrue(l[size - 1] is l[size + 1])
@bigmemtest(size=_2G // 2 + 2, memuse=24)
def test_inplace_concat_small(self, size):
return self.basic_test_inplace_concat(size)
@bigmemtest(size=_2G + 2, memuse=24)
def test_inplace_concat_large(self, size):
return self.basic_test_inplace_concat(size)
@bigmemtest(size=_2G // 5 + 10, memuse=8 * 5)
def test_contains(self, size):
l = [1, 2, 3, 4, 5] * size
self.assertEqual(len(l), size * 5)
self.assertTrue(5 in l)
self.assertFalse([1, 2, 3, 4, 5] in l)
self.assertFalse(0 in l)
@bigmemtest(size=_2G + 10, memuse=8)
def test_hash(self, size):
l = [0] * size
self.assertRaises(TypeError, hash, l)
@bigmemtest(size=_2G + 10, memuse=8)
def test_index_and_slice(self, size):
l = [None] * size
self.assertEqual(len(l), size)
self.assertEqual(l[-1], None)
self.assertEqual(l[5], None)
self.assertEqual(l[size - 1], None)
self.assertRaises(IndexError, operator.getitem, l, size)
self.assertEqual(l[:5], [None] * 5)
self.assertEqual(l[-5:], [None] * 5)
self.assertEqual(l[20:25], [None] * 5)
self.assertEqual(l[-25:-20], [None] * 5)
self.assertEqual(l[size - 5:], [None] * 5)
self.assertEqual(l[size - 5:size], [None] * 5)
self.assertEqual(l[size - 6:size - 2], [None] * 4)
self.assertEqual(l[size:size], [])
self.assertEqual(l[size:size+5], [])
l[size - 2] = 5
self.assertEqual(len(l), size)
self.assertEqual(l[-3:], [None, 5, None])
self.assertEqual(l.count(5), 1)
self.assertRaises(IndexError, operator.setitem, l, size, 6)
self.assertEqual(len(l), size)
l[size - 7:] = [1, 2, 3, 4, 5]
size -= 2
self.assertEqual(len(l), size)
self.assertEqual(l[-7:], [None, None, 1, 2, 3, 4, 5])
l[:7] = [1, 2, 3, 4, 5]
size -= 2
self.assertEqual(len(l), size)
self.assertEqual(l[:7], [1, 2, 3, 4, 5, None, None])
del l[size - 1]
size -= 1
self.assertEqual(len(l), size)
self.assertEqual(l[-1], 4)
del l[-2:]
size -= 2
self.assertEqual(len(l), size)
self.assertEqual(l[-1], 2)
del l[0]
size -= 1
self.assertEqual(len(l), size)
self.assertEqual(l[0], 2)
del l[:2]
size -= 2
self.assertEqual(len(l), size)
self.assertEqual(l[0], 4)
# Like test_concat, split in two.
def basic_test_repeat(self, size):
l = [] * size
self.assertFalse(l)
l = [''] * size
self.assertEqual(len(l), size)
l = l * 2
self.assertEqual(len(l), size * 2)
@bigmemtest(size=_2G // 2 + 2, memuse=24)
def test_repeat_small(self, size):
return self.basic_test_repeat(size)
@bigmemtest(size=_2G + 2, memuse=24)
def test_repeat_large(self, size):
return self.basic_test_repeat(size)
def basic_test_inplace_repeat(self, size):
l = ['']
l *= size
self.assertEqual(len(l), size)
self.assertTrue(l[0] is l[-1])
del l
l = [''] * size
l *= 2
self.assertEqual(len(l), size * 2)
self.assertTrue(l[size - 1] is l[-1])
@bigmemtest(size=_2G // 2 + 2, memuse=16)
def test_inplace_repeat_small(self, size):
return self.basic_test_inplace_repeat(size)
@bigmemtest(size=_2G + 2, memuse=16)
def test_inplace_repeat_large(self, size):
return self.basic_test_inplace_repeat(size)
def basic_test_repr(self, size):
l = [0] * size
s = repr(l)
# The repr of a list of 0's is exactly three times the list length.
self.assertEqual(len(s), size * 3)
self.assertEqual(s[:5], '[0, 0')
self.assertEqual(s[-5:], '0, 0]')
self.assertEqual(s.count('0'), size)
@bigmemtest(size=_2G // 3 + 2, memuse=8 + 3 * ascii_char_size)
def test_repr_small(self, size):
return self.basic_test_repr(size)
@bigmemtest(size=_2G + 2, memuse=8 + 3 * ascii_char_size)
def test_repr_large(self, size):
return self.basic_test_repr(size)
# list overallocates ~1/8th of the total size (on first expansion) so
# the single list.append call puts memuse at 9 bytes per size.
@bigmemtest(size=_2G, memuse=9)
def test_append(self, size):
l = [object()] * size
l.append(object())
self.assertEqual(len(l), size+1)
self.assertTrue(l[-3] is l[-2])
self.assertFalse(l[-2] is l[-1])
@bigmemtest(size=_2G // 5 + 2, memuse=8 * 5)
def test_count(self, size):
l = [1, 2, 3, 4, 5] * size
self.assertEqual(l.count(1), size)
self.assertEqual(l.count("1"), 0)
def basic_test_extend(self, size):
l = [object] * size
l.extend(l)
self.assertEqual(len(l), size * 2)
self.assertTrue(l[0] is l[-1])
self.assertTrue(l[size - 1] is l[size + 1])
@bigmemtest(size=_2G // 2 + 2, memuse=16)
def test_extend_small(self, size):
return self.basic_test_extend(size)
@bigmemtest(size=_2G + 2, memuse=16)
def test_extend_large(self, size):
return self.basic_test_extend(size)
@bigmemtest(size=_2G // 5 + 2, memuse=8 * 5)
def test_index(self, size):
l = [1, 2, 3, 4, 5] * size
size *= 5
self.assertEqual(l.index(1), 0)
self.assertEqual(l.index(5, size - 5), size - 1)
self.assertEqual(l.index(5, size - 5, size), size - 1)
self.assertRaises(ValueError, l.index, 1, size - 4, size)
self.assertRaises(ValueError, l.index, 6)
# This tests suffers from overallocation, just like test_append.
@bigmemtest(size=_2G + 10, memuse=9)
def test_insert(self, size):
l = [1.0] * size
l.insert(size - 1, "A")
size += 1
self.assertEqual(len(l), size)
self.assertEqual(l[-3:], [1.0, "A", 1.0])
l.insert(size + 1, "B")
size += 1
self.assertEqual(len(l), size)
self.assertEqual(l[-3:], ["A", 1.0, "B"])
l.insert(1, "C")
size += 1
self.assertEqual(len(l), size)
self.assertEqual(l[:3], [1.0, "C", 1.0])
self.assertEqual(l[size - 3:], ["A", 1.0, "B"])
@bigmemtest(size=_2G // 5 + 4, memuse=8 * 5)
def test_pop(self, size):
l = ["a", "b", "c", "d", "e"] * size
size *= 5
self.assertEqual(len(l), size)
item = l.pop()
size -= 1
self.assertEqual(len(l), size)
self.assertEqual(item, "e")
self.assertEqual(l[-2:], ["c", "d"])
item = l.pop(0)
size -= 1
self.assertEqual(len(l), size)
self.assertEqual(item, "a")
self.assertEqual(l[:2], ["b", "c"])
item = l.pop(size - 2)
size -= 1
self.assertEqual(len(l), size)
self.assertEqual(item, "c")
self.assertEqual(l[-2:], ["b", "d"])
@bigmemtest(size=_2G + 10, memuse=8)
def test_remove(self, size):
l = [10] * size
self.assertEqual(len(l), size)
l.remove(10)
size -= 1
self.assertEqual(len(l), size)
# Because of the earlier l.remove(), this append doesn't trigger
# a resize.
l.append(5)
size += 1
self.assertEqual(len(l), size)
self.assertEqual(l[-2:], [10, 5])
l.remove(5)
size -= 1
self.assertEqual(len(l), size)
self.assertEqual(l[-2:], [10, 10])
@bigmemtest(size=_2G // 5 + 2, memuse=8 * 5)
def test_reverse(self, size):
l = [1, 2, 3, 4, 5] * size
l.reverse()
self.assertEqual(len(l), size * 5)
self.assertEqual(l[-5:], [5, 4, 3, 2, 1])
self.assertEqual(l[:5], [5, 4, 3, 2, 1])
@bigmemtest(size=_2G // 5 + 2, memuse=8 * 5)
def test_sort(self, size):
l = [1, 2, 3, 4, 5] * size
l.sort()
self.assertEqual(len(l), size * 5)
self.assertEqual(l.count(1), size)
self.assertEqual(l[:10], [1] * 10)
self.assertEqual(l[-10:], [5] * 10)
def test_main():
support.run_unittest(StrTest, BytesTest, BytearrayTest,
TupleTest, ListTest)
if __name__ == '__main__':
if len(sys.argv) > 1:
support.set_memlimit(sys.argv[1])
test_main()
|
lgpl-3.0
| 3,965,294,777,578,926,000 | 5,322,184,264,473,852,000 | 34.991249 | 84 | 0.559249 | false |
shurihell/testasia
|
lms/djangoapps/instructor/tests/utils.py
|
121
|
2732
|
"""
Utilities for instructor unit tests
"""
import datetime
import json
import random
from django.utils.timezone import utc
from util.date_utils import get_default_time_display
class FakeInfo(object):
"""Parent class for faking objects used in tests"""
FEATURES = []
def __init__(self):
for feature in self.FEATURES:
setattr(self, feature, u'expected')
def to_dict(self):
""" Returns a dict representation of the object """
return {key: getattr(self, key) for key in self.FEATURES}
class FakeContentTask(FakeInfo):
""" Fake task info needed for email content list """
FEATURES = [
'task_input',
'task_output',
'requester',
]
def __init__(self, email_id, num_sent, num_failed, sent_to):
super(FakeContentTask, self).__init__()
self.task_input = {'email_id': email_id, 'to_option': sent_to}
self.task_input = json.dumps(self.task_input)
self.task_output = {'succeeded': num_sent, 'failed': num_failed}
self.task_output = json.dumps(self.task_output)
self.requester = 'expected'
def make_invalid_input(self):
"""Corrupt the task input field to test errors"""
self.task_input = "THIS IS INVALID JSON"
class FakeEmail(FakeInfo):
""" Corresponding fake email for a fake task """
FEATURES = [
'subject',
'html_message',
'id',
'created',
]
def __init__(self, email_id):
super(FakeEmail, self).__init__()
self.id = unicode(email_id) # pylint: disable=invalid-name
# Select a random data for create field
year = random.randint(1950, 2000)
month = random.randint(1, 12)
day = random.randint(1, 28)
hour = random.randint(0, 23)
minute = random.randint(0, 59)
self.created = datetime.datetime(year, month, day, hour, minute, tzinfo=utc)
class FakeEmailInfo(FakeInfo):
""" Fake email information object """
FEATURES = [
u'created',
u'sent_to',
u'email',
u'number_sent',
u'requester',
]
EMAIL_FEATURES = [
u'subject',
u'html_message',
u'id'
]
def __init__(self, fake_email, num_sent, num_failed):
super(FakeEmailInfo, self).__init__()
self.created = get_default_time_display(fake_email.created)
number_sent = str(num_sent) + ' sent'
if num_failed > 0:
number_sent += ', ' + str(num_failed) + " failed"
self.number_sent = number_sent
fake_email_dict = fake_email.to_dict()
self.email = {feature: fake_email_dict[feature] for feature in self.EMAIL_FEATURES}
self.requester = u'expected'
|
agpl-3.0
| 2,188,451,805,396,905,700 | 3,474,870,084,684,307,500 | 28.376344 | 91 | 0.593704 | false |
nmartensen/pandas
|
asv_bench/benchmarks/gil.py
|
7
|
11003
|
from .pandas_vb_common import *
from pandas.core.algorithms import take_1d
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
try:
from pandas._libs import algos
except ImportError:
from pandas import algos
try:
from pandas.util.testing import test_parallel
have_real_test_parallel = True
except ImportError:
have_real_test_parallel = False
def test_parallel(num_threads=1):
def wrapper(fname):
return fname
return wrapper
class NoGilGroupby(object):
goal_time = 0.2
def setup(self):
self.N = 1000000
self.ngroups = 1000
np.random.seed(1234)
self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })
np.random.seed(1234)
self.size = 2 ** 22
self.ngroups = 100
self.data = Series(np.random.randint(0, self.ngroups, size=self.size))
if (not have_real_test_parallel):
raise NotImplementedError
@test_parallel(num_threads=2)
def _pg2_count(self):
self.df.groupby('key')['data'].count()
def time_count_2(self):
self._pg2_count()
@test_parallel(num_threads=2)
def _pg2_last(self):
self.df.groupby('key')['data'].last()
def time_last_2(self):
self._pg2_last()
@test_parallel(num_threads=2)
def _pg2_max(self):
self.df.groupby('key')['data'].max()
def time_max_2(self):
self._pg2_max()
@test_parallel(num_threads=2)
def _pg2_mean(self):
self.df.groupby('key')['data'].mean()
def time_mean_2(self):
self._pg2_mean()
@test_parallel(num_threads=2)
def _pg2_min(self):
self.df.groupby('key')['data'].min()
def time_min_2(self):
self._pg2_min()
@test_parallel(num_threads=2)
def _pg2_prod(self):
self.df.groupby('key')['data'].prod()
def time_prod_2(self):
self._pg2_prod()
@test_parallel(num_threads=2)
def _pg2_sum(self):
self.df.groupby('key')['data'].sum()
def time_sum_2(self):
self._pg2_sum()
@test_parallel(num_threads=4)
def _pg4_sum(self):
self.df.groupby('key')['data'].sum()
def time_sum_4(self):
self._pg4_sum()
def time_sum_4_notp(self):
for i in range(4):
self.df.groupby('key')['data'].sum()
def _f_sum(self):
self.df.groupby('key')['data'].sum()
@test_parallel(num_threads=8)
def _pg8_sum(self):
self._f_sum()
def time_sum_8(self):
self._pg8_sum()
def time_sum_8_notp(self):
for i in range(8):
self._f_sum()
@test_parallel(num_threads=2)
def _pg2_var(self):
self.df.groupby('key')['data'].var()
def time_var_2(self):
self._pg2_var()
# get groups
def _groups(self):
self.data.groupby(self.data).groups
@test_parallel(num_threads=2)
def _pg2_groups(self):
self._groups()
def time_groups_2(self):
self._pg2_groups()
@test_parallel(num_threads=4)
def _pg4_groups(self):
self._groups()
def time_groups_4(self):
self._pg4_groups()
@test_parallel(num_threads=8)
def _pg8_groups(self):
self._groups()
def time_groups_8(self):
self._pg8_groups()
class nogil_take1d_float64(object):
goal_time = 0.2
def setup(self):
self.N = 1000000
self.ngroups = 1000
np.random.seed(1234)
self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })
if (not have_real_test_parallel):
raise NotImplementedError
self.N = 10000000.0
self.df = DataFrame({'int64': np.arange(self.N, dtype='int64'), 'float64': np.arange(self.N, dtype='float64'), })
self.indexer = np.arange(100, (len(self.df) - 100))
def time_nogil_take1d_float64(self):
self.take_1d_pg2_int64()
@test_parallel(num_threads=2)
def take_1d_pg2_int64(self):
take_1d(self.df.int64.values, self.indexer)
@test_parallel(num_threads=2)
def take_1d_pg2_float64(self):
take_1d(self.df.float64.values, self.indexer)
class nogil_take1d_int64(object):
goal_time = 0.2
def setup(self):
self.N = 1000000
self.ngroups = 1000
np.random.seed(1234)
self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })
if (not have_real_test_parallel):
raise NotImplementedError
self.N = 10000000.0
self.df = DataFrame({'int64': np.arange(self.N, dtype='int64'), 'float64': np.arange(self.N, dtype='float64'), })
self.indexer = np.arange(100, (len(self.df) - 100))
def time_nogil_take1d_int64(self):
self.take_1d_pg2_float64()
@test_parallel(num_threads=2)
def take_1d_pg2_int64(self):
take_1d(self.df.int64.values, self.indexer)
@test_parallel(num_threads=2)
def take_1d_pg2_float64(self):
take_1d(self.df.float64.values, self.indexer)
class nogil_kth_smallest(object):
number = 1
repeat = 5
def setup(self):
if (not have_real_test_parallel):
raise NotImplementedError
np.random.seed(1234)
self.N = 10000000
self.k = 500000
self.a = np.random.randn(self.N)
self.b = self.a.copy()
self.kwargs_list = [{'arr': self.a}, {'arr': self.b}]
def time_nogil_kth_smallest(self):
@test_parallel(num_threads=2, kwargs_list=self.kwargs_list)
def run(arr):
algos.kth_smallest(arr, self.k)
run()
class nogil_datetime_fields(object):
goal_time = 0.2
def setup(self):
self.N = 100000000
self.dti = pd.date_range('1900-01-01', periods=self.N, freq='T')
self.period = self.dti.to_period('D')
if (not have_real_test_parallel):
raise NotImplementedError
def time_datetime_field_year(self):
@test_parallel(num_threads=2)
def run(dti):
dti.year
run(self.dti)
def time_datetime_field_day(self):
@test_parallel(num_threads=2)
def run(dti):
dti.day
run(self.dti)
def time_datetime_field_daysinmonth(self):
@test_parallel(num_threads=2)
def run(dti):
dti.days_in_month
run(self.dti)
def time_datetime_field_normalize(self):
@test_parallel(num_threads=2)
def run(dti):
dti.normalize()
run(self.dti)
def time_datetime_to_period(self):
@test_parallel(num_threads=2)
def run(dti):
dti.to_period('S')
run(self.dti)
def time_period_to_datetime(self):
@test_parallel(num_threads=2)
def run(period):
period.to_timestamp()
run(self.period)
class nogil_rolling_algos_slow(object):
goal_time = 0.2
def setup(self):
self.win = 100
np.random.seed(1234)
self.arr = np.random.rand(100000)
if (not have_real_test_parallel):
raise NotImplementedError
def time_nogil_rolling_median(self):
@test_parallel(num_threads=2)
def run(arr, win):
rolling_median(arr, win)
run(self.arr, self.win)
class nogil_rolling_algos_fast(object):
goal_time = 0.2
def setup(self):
self.win = 100
np.random.seed(1234)
self.arr = np.random.rand(1000000)
if (not have_real_test_parallel):
raise NotImplementedError
def time_nogil_rolling_mean(self):
@test_parallel(num_threads=2)
def run(arr, win):
rolling_mean(arr, win)
run(self.arr, self.win)
def time_nogil_rolling_min(self):
@test_parallel(num_threads=2)
def run(arr, win):
rolling_min(arr, win)
run(self.arr, self.win)
def time_nogil_rolling_max(self):
@test_parallel(num_threads=2)
def run(arr, win):
rolling_max(arr, win)
run(self.arr, self.win)
def time_nogil_rolling_var(self):
@test_parallel(num_threads=2)
def run(arr, win):
rolling_var(arr, win)
run(self.arr, self.win)
def time_nogil_rolling_skew(self):
@test_parallel(num_threads=2)
def run(arr, win):
rolling_skew(arr, win)
run(self.arr, self.win)
def time_nogil_rolling_kurt(self):
@test_parallel(num_threads=2)
def run(arr, win):
rolling_kurt(arr, win)
run(self.arr, self.win)
def time_nogil_rolling_std(self):
@test_parallel(num_threads=2)
def run(arr, win):
rolling_std(arr, win)
run(self.arr, self.win)
class nogil_read_csv(object):
number = 1
repeat = 5
def setup(self):
if (not have_real_test_parallel):
raise NotImplementedError
# Using the values
self.df = DataFrame(np.random.randn(10000, 50))
self.df.to_csv('__test__.csv')
self.rng = date_range('1/1/2000', periods=10000)
self.df_date_time = DataFrame(np.random.randn(10000, 50), index=self.rng)
self.df_date_time.to_csv('__test_datetime__.csv')
self.df_object = DataFrame('foo', index=self.df.index, columns=self.create_cols('object'))
self.df_object.to_csv('__test_object__.csv')
def create_cols(self, name):
return [('%s%03d' % (name, i)) for i in range(5)]
@test_parallel(num_threads=2)
def pg_read_csv(self):
read_csv('__test__.csv', sep=',', header=None, float_precision=None)
def time_read_csv(self):
self.pg_read_csv()
@test_parallel(num_threads=2)
def pg_read_csv_object(self):
read_csv('__test_object__.csv', sep=',')
def time_read_csv_object(self):
self.pg_read_csv_object()
@test_parallel(num_threads=2)
def pg_read_csv_datetime(self):
read_csv('__test_datetime__.csv', sep=',', header=None)
def time_read_csv_datetime(self):
self.pg_read_csv_datetime()
class nogil_factorize(object):
number = 1
repeat = 5
def setup(self):
if (not have_real_test_parallel):
raise NotImplementedError
np.random.seed(1234)
self.strings = tm.makeStringIndex(100000)
def factorize_strings(self):
pd.factorize(self.strings)
@test_parallel(num_threads=4)
def _pg_factorize_strings_4(self):
self.factorize_strings()
def time_factorize_strings_4(self):
for i in range(2):
self._pg_factorize_strings_4()
@test_parallel(num_threads=2)
def _pg_factorize_strings_2(self):
self.factorize_strings()
def time_factorize_strings_2(self):
for i in range(4):
self._pg_factorize_strings_2()
def time_factorize_strings(self):
for i in range(8):
self.factorize_strings()
|
bsd-3-clause
| 7,764,923,928,491,081,000 | 1,441,491,154,216,273,400 | 25.07346 | 121 | 0.583205 | false |
SophieIPP/openfisca-france
|
openfisca_france/model/prelevements_obligatoires/prelevements_sociaux/cotisations_sociales/travail_fonction_publique.py
|
1
|
13483
|
# -*- coding: utf-8 -*-
from __future__ import division
import math
from numpy import minimum as min_
from ....base import * # noqa analysis:ignore
from .base import apply_bareme_for_relevant_type_sal
class allocations_temporaires_invalidite(Variable):
column = FloatCol
entity_class = Individus
label = u"Allocations temporaires d'invalidité (ATI, fonction publique et collectivités locales)"
# patronale, non-contributive
def function(self, simulation, period):
period = period.this_month
assiette_cotisations_sociales_public = simulation.calculate('assiette_cotisations_sociales_public', period)
plafond_securite_sociale = simulation.calculate('plafond_securite_sociale', period)
type_sal = simulation.calculate('type_sal', period)
_P = simulation.legislation_at(period.start)
base = assiette_cotisations_sociales_public
cotisation_etat = apply_bareme_for_relevant_type_sal(
bareme_by_type_sal_name = _P.cotsoc.cotisations_employeur,
bareme_name = "ati",
base = base,
plafond_securite_sociale = plafond_securite_sociale,
type_sal = type_sal,
)
cotisation_collectivites_locales = apply_bareme_for_relevant_type_sal(
bareme_by_type_sal_name = _P.cotsoc.cotisations_employeur,
bareme_name = "atiacl",
base = base,
plafond_securite_sociale = plafond_securite_sociale,
type_sal = type_sal,
)
return period, cotisation_etat + cotisation_collectivites_locales
class assiette_cotisations_sociales_public(Variable):
column = FloatCol
entity_class = Individus
label = u"Assiette des cotisations sociales des agents titulaires de la fonction publique"
# TODO: gestion des heures supplémentaires
def function(self, simulation, period):
remuneration_principale = simulation.calculate('remuneration_principale', period)
# primes_fonction_publique = simulation.calculate('primes_fonction_publique', period)
# indemnite_residence = simulation.calculate('indemnite_residence', period)
type_sal = simulation.calculate('type_sal', period)
public = (type_sal >= 2)
# titulaire = (type_sal >= 2) * (type_sal <= 5)
assiette = public * (
remuneration_principale
# + not_(titulaire) * (indemnite_residence + primes_fonction_publique)
)
return period, assiette
# sft dans assiette csg et RAFP et Cotisation exceptionnelle de solidarité et taxe sur les salaires
# primes dont indemnites de residences idem sft
# avantages en nature contrib exceptionnelle de solidarite, RAFP, CSG, CRDS.
class contribution_exceptionnelle_solidarite(Variable):
column = FloatCol
entity_class = Individus
label = u"Cotisation exceptionnelle au fonds de solidarité (salarié)"
def function(self, simulation, period):
period = period.this_month
traitement_indiciaire_brut = simulation.calculate('traitement_indiciaire_brut', period)
hsup = simulation.calculate('hsup', period)
type_sal = simulation.calculate('type_sal', period)
indemnite_residence = simulation.calculate('indemnite_residence', period)
primes_fonction_publique = simulation.calculate('primes_fonction_publique', period)
rafp_salarie = simulation.calculate('rafp_salarie', period)
pension_civile_salarie = simulation.calculate('pension_civile_salarie', period)
cotisations_salariales_contributives = simulation.calculate('cotisations_salariales_contributives', period)
plafond_securite_sociale = simulation.calculate('plafond_securite_sociale', period)
salaire_de_base = simulation.calculate('salaire_de_base', period)
_P = simulation.legislation_at(period.start)
seuil_assuj_fds = seuil_fds(_P)
assujettis = (
(type_sal == CAT['public_titulaire_etat']) +
(type_sal == CAT['public_titulaire_territoriale']) +
(type_sal == CAT['public_titulaire_hospitaliere']) +
(type_sal == CAT['public_non_titulaire'])
) * (
(traitement_indiciaire_brut + salaire_de_base - hsup) > seuil_assuj_fds
)
# TODO: check assiette voir IPP
cotisation = apply_bareme_for_relevant_type_sal(
bareme_by_type_sal_name = _P.cotsoc.cotisations_salarie,
bareme_name = "excep_solidarite",
base = assujettis * min_(
(
traitement_indiciaire_brut + salaire_de_base - hsup + indemnite_residence + rafp_salarie +
pension_civile_salarie +
primes_fonction_publique +
(type_sal == CAT['public_non_titulaire']) * cotisations_salariales_contributives
),
_P.cotsoc.sal.fonc.commun.plafond_base_solidarite,
),
plafond_securite_sociale = plafond_securite_sociale,
type_sal = type_sal,
)
return period, cotisation
class fonds_emploi_hospitalier(Variable):
column = FloatCol
entity_class = Individus
label = u"Fonds pour l'emploi hospitalier (employeur)"
def function(self, simulation, period):
period = period.this_month
assiette_cotisations_sociales_public = simulation.calculate('assiette_cotisations_sociales_public', period)
plafond_securite_sociale = simulation.calculate('plafond_securite_sociale', period)
type_sal = simulation.calculate('type_sal', period)
_P = simulation.legislation_at(period.start)
cotisation = apply_bareme_for_relevant_type_sal(
bareme_by_type_sal_name = _P.cotsoc.cotisations_employeur,
bareme_name = "feh",
base = assiette_cotisations_sociales_public, # TODO: check base
plafond_securite_sociale = plafond_securite_sociale,
type_sal = type_sal,
)
return period, cotisation
class ircantec_salarie(Variable):
column = FloatCol
entity_class = Individus
label = u"Ircantec salarié"
def function(self, simulation, period):
period = period.this_month
assiette_cotisations_sociales = simulation.calculate('assiette_cotisations_sociales', period)
plafond_securite_sociale = simulation.calculate('plafond_securite_sociale', period)
type_sal = simulation.calculate('type_sal', period)
_P = simulation.legislation_at(period.start)
ircantec = apply_bareme_for_relevant_type_sal(
bareme_by_type_sal_name = _P.cotsoc.cotisations_salarie,
bareme_name = "ircantec",
base = assiette_cotisations_sociales,
plafond_securite_sociale = plafond_securite_sociale,
type_sal = type_sal,
)
return period, ircantec
class ircantec_employeur(Variable):
column = FloatCol
entity_class = Individus
label = u"Ircantec employeur"
def function(self, simulation, period):
period = period.this_month
assiette_cotisations_sociales = simulation.calculate('assiette_cotisations_sociales', period)
plafond_securite_sociale = simulation.calculate('plafond_securite_sociale', period)
type_sal = simulation.calculate('type_sal', period)
_P = simulation.legislation_at(period.start)
ircantec = apply_bareme_for_relevant_type_sal(
bareme_by_type_sal_name = _P.cotsoc.cotisations_employeur,
bareme_name = "ircantec",
base = assiette_cotisations_sociales,
plafond_securite_sociale = plafond_securite_sociale,
type_sal = type_sal,
)
return period, ircantec
class pension_civile_salarie(Variable):
column = FloatCol
entity_class = Individus
label = u"Pension civile salarié"
url = u"http://www.ac-besancon.fr/spip.php?article2662",
def function(self, simulation, period):
period = period.this_month
traitement_indiciaire_brut = simulation.calculate('traitement_indiciaire_brut', period) # TODO: check nbi
type_sal = simulation.calculate('type_sal', period)
_P = simulation.legislation_at(period.start)
sal = _P.cotsoc.cotisations_salarie
terr_or_hosp = (
type_sal == CAT['public_titulaire_territoriale']) | (type_sal == CAT['public_titulaire_hospitaliere'])
pension_civile_salarie = (
(type_sal == CAT['public_titulaire_etat']) *
sal['public_titulaire_etat']['pension'].calc(traitement_indiciaire_brut) +
terr_or_hosp * sal['public_titulaire_territoriale']['cnracl1'].calc(traitement_indiciaire_brut)
)
return period, -pension_civile_salarie
class pension_civile_employeur(Variable):
column = FloatCol
entity_class = Individus
label = u"Cotisation patronale pension civile"
url = u"http://www.ac-besancon.fr/spip.php?article2662"
def function(self, simulation, period):
period = period.this_month
assiette_cotisations_sociales_public = simulation.calculate('assiette_cotisations_sociales_public', period)
# plafond_securite_sociale = simulation.calculate('plafond_securite_sociale', period)
type_sal = simulation.calculate('type_sal', period)
_P = simulation.legislation_at(period.start)
pat = _P.cotsoc.cotisations_employeur
terr_or_hosp = (
(type_sal == CAT['public_titulaire_territoriale']) | (type_sal == CAT['public_titulaire_hospitaliere'])
)
cot_pat_pension_civile = (
(type_sal == CAT['public_titulaire_etat']) * pat['public_titulaire_etat']['pension'].calc(
assiette_cotisations_sociales_public) +
terr_or_hosp * pat['public_titulaire_territoriale']['cnracl'].calc(assiette_cotisations_sociales_public)
)
return period, -cot_pat_pension_civile
class rafp_salarie(DatedVariable):
column = FloatCol
entity_class = Individus
label = u"Part salariale de la retraite additionelle de la fonction publique"
# Part salariale de la retraite additionelle de la fonction publique
# TODO: ajouter la gipa qui n'est pas affectée par le plafond d'assiette
@dated_function(start = date(2005, 1, 1))
def function(self, simulation, period):
period = period.this_month
traitement_indiciaire_brut = simulation.calculate('traitement_indiciaire_brut', period)
type_sal = simulation.calculate('type_sal', period)
primes_fonction_publique = simulation.calculate('primes_fonction_publique', period)
supp_familial_traitement = simulation.calculate('supp_familial_traitement', period)
indemnite_residence = simulation.calculate('indemnite_residence', period)
_P = simulation.legislation_at(period.start)
eligible = ((type_sal == CAT['public_titulaire_etat'])
+ (type_sal == CAT['public_titulaire_territoriale'])
+ (type_sal == CAT['public_titulaire_hospitaliere']))
plaf_ass = _P.cotsoc.sal.fonc.etat.rafp_plaf_assiette
base_imposable = primes_fonction_publique + supp_familial_traitement + indemnite_residence
assiette = min_(base_imposable, plaf_ass * traitement_indiciaire_brut * eligible)
# Même régime pour les fonctions publiques d'Etat et des collectivité locales
rafp_salarie = eligible * _P.cotsoc.cotisations_salarie.public_titulaire_etat['rafp'].calc(assiette)
return period, -rafp_salarie
class rafp_employeur(DatedVariable):
column = FloatCol
entity_class = Individus
label = u"Part patronale de la retraite additionnelle de la fonction publique"
# TODO: ajouter la gipa qui n'est pas affectée par le plafond d'assiette
@dated_function(start = date(2005, 1, 1))
def function(self, simulation, period):
period = period.this_month
traitement_indiciaire_brut = simulation.calculate('traitement_indiciaire_brut', period)
type_sal = simulation.calculate('type_sal', period)
primes_fonction_publique = simulation.calculate('primes_fonction_publique', period)
supp_familial_traitement = simulation.calculate('supp_familial_traitement', period)
indemnite_residence = simulation.calculate('indemnite_residence', period)
_P = simulation.legislation_at(period.start)
eligible = (
(type_sal == CAT['public_titulaire_etat']) +
(type_sal == CAT['public_titulaire_territoriale']) +
(type_sal == CAT['public_titulaire_hospitaliere'])
)
plaf_ass = _P.cotsoc.sal.fonc.etat.rafp_plaf_assiette
base_imposable = primes_fonction_publique + supp_familial_traitement + indemnite_residence
assiette = min_(base_imposable, plaf_ass * traitement_indiciaire_brut * eligible)
bareme_rafp = _P.cotsoc.cotisations_employeur.public_titulaire_etat['rafp']
rafp_employeur = eligible * bareme_rafp.calc(assiette)
return period, - rafp_employeur
def seuil_fds(law):
'''
Calcul du seuil mensuel d'assujetissement à la contribution au fond de solidarité
'''
ind_maj_ref = law.cotsoc.sal.fonc.commun.ind_maj_ref
pt_ind_mensuel = law.cotsoc.sal.fonc.commun.pt_ind / 12
seuil_mensuel = math.floor((pt_ind_mensuel * ind_maj_ref))
return seuil_mensuel
|
agpl-3.0
| -7,209,662,411,645,354,000 | 2,500,375,164,333,328,000 | 44.043478 | 116 | 0.662236 | false |
kingvuplus/gui_test5
|
lib/python/Plugins/SystemPlugins/DiseqcTester/plugin.py
|
63
|
27159
|
import random
from Screens.Satconfig import NimSelection
from Screens.Screen import Screen
from Screens.TextBox import TextBox
from Screens.MessageBox import MessageBox
from Plugins.Plugin import PluginDescriptor
from Components.ActionMap import ActionMap, NumberActionMap
from Components.NimManager import nimmanager
from Components.ResourceManager import resourcemanager
from Components.TuneTest import TuneTest
from Components.Sources.List import List
from Components.Sources.Progress import Progress
from Components.Sources.StaticText import StaticText
from Components.ConfigList import ConfigListScreen
from Components.config import getConfigListEntry, ConfigSelection, ConfigYesNo
from Components.Harddisk import harddiskmanager
# always use:
# setResultType(type)
# setResultParameter(parameter)
# getTextualResult()
class ResultParser:
def __init__(self):
pass
TYPE_BYORBPOS = 0
TYPE_BYINDEX = 1
TYPE_ALL = 2
def setResultType(self, type):
self.type = type
def setResultParameter(self, parameter):
if self.type == self.TYPE_BYORBPOS:
self.orbpos = parameter
elif self.type == self.TYPE_BYINDEX:
self.index = parameter
def getTextualResultForIndex(self, index, logfulltransponders = False):
text = ""
text += "%s:\n" % self.getTextualIndexRepresentation(index)
failed, successful = self.results[index]["failed"], self.results[index]["successful"]
countfailed = len(failed)
countsuccessful = len(successful)
countall = countfailed + countsuccessful
percentfailed = round(countfailed / float(countall + 0.0001) * 100)
percentsuccessful = round(countsuccessful / float(countall + 0.0001) * 100)
text += "Tested %d transponders\n%d (%d %%) transponders succeeded\n%d (%d %%) transponders failed\n" % (countall, countsuccessful, percentsuccessful, countfailed, percentfailed)
reasons = {}
completelist = []
if countfailed > 0:
for transponder in failed:
completelist.append({"transponder": transponder[0], "fedata": transponder[-1]})
reasons[transponder[2]] = reasons.get(transponder[2], [])
reasons[transponder[2]].append(transponder)
if transponder[2] == "pids_failed":
print transponder[2], "-", transponder[3]
text += "The %d unsuccessful tuning attempts failed for the following reasons:\n" % countfailed
for reason in reasons.keys():
text += "%s: %d transponders failed\n" % (reason, len(reasons[reason]))
for reason in reasons.keys():
text += "\n"
text += "%s previous planes:\n" % reason
for transponder in reasons[reason]:
if transponder[1] is not None:
text += self.getTextualIndexRepresentation(self.getIndexForTransponder(transponder[1]))
else:
text += "No transponder tuned"
text += " ==> " + self.getTextualIndexRepresentation(self.getIndexForTransponder(transponder[0]))
text += "\n"
if logfulltransponders:
text += str(transponder[1])
text += " ==> "
text += str(transponder[0])
text += "\n"
if reason == "pids_failed":
text += "(tsid, onid): "
text += str(transponder[3]['real'])
text += "(read from sat) != "
text += str(transponder[3]['expected'])
text += "(read from file)"
text += "\n"
text += "\n"
if countsuccessful > 0:
text += "\n"
text += "Successfully tuned transponders' previous planes:\n"
for transponder in successful:
completelist.append({"transponder": transponder[0], "fedata": transponder[-1]})
if transponder[1] is not None:
text += self.getTextualIndexRepresentation(self.getIndexForTransponder(transponder[1]))
else:
text += "No transponder tuned"
text += " ==> " + self.getTextualIndexRepresentation(self.getIndexForTransponder(transponder[0]))
text += "\n"
text += "------------------------------------------------\n"
text += "complete transponderlist:\n"
for entry in completelist:
text += str(entry["transponder"]) + " -- " + str(entry["fedata"]) + "\n"
return text
def getTextualResult(self):
text = ""
if self.type == self.TYPE_BYINDEX:
text += self.getTextualResultForIndex(self.index)
elif self.type == self.TYPE_BYORBPOS:
for index in self.results.keys():
if index[2] == self.orbpos:
text += self.getTextualResultForIndex(index)
text += "\n-----------------------------------------------------\n"
elif self.type == self.TYPE_ALL:
orderedResults = {}
for index in self.results.keys():
orbpos = index[2]
orderedResults[orbpos] = orderedResults.get(orbpos, [])
orderedResults[orbpos].append(index)
ordered_orbpos = orderedResults.keys()
ordered_orbpos.sort()
for orbpos in ordered_orbpos:
text += "\n*****************************************\n"
text += "Orbital position %s:" % str(orbpos)
text += "\n*****************************************\n"
for index in orderedResults[orbpos]:
text += self.getTextualResultForIndex(index, logfulltransponders = True)
text += "\n-----------------------------------------------------\n"
return text
class DiseqcTester(Screen, TuneTest, ResultParser):
skin = """
<screen position="90,100" size="520,400" title="DiSEqC Tester" >
<!--ePixmap pixmap="icons/dish_scan.png" position="5,25" zPosition="0" size="119,110" transparent="1" alphatest="on" />
<widget source="Frontend" render="Label" position="190,10" zPosition="2" size="260,20" font="Regular;19" halign="center" valign="center" transparent="1">
<convert type="FrontendInfo">SNRdB</convert>
</widget>
<eLabel name="snr" text="SNR:" position="120,35" size="60,22" font="Regular;21" halign="right" transparent="1" />
<widget source="Frontend" render="Progress" position="190,35" size="260,20" pixmap="bar_snr.png" borderWidth="2" borderColor="#cccccc">
<convert type="FrontendInfo">SNR</convert>
</widget>
<widget source="Frontend" render="Label" position="460,35" size="60,22" font="Regular;21">
<convert type="FrontendInfo">SNR</convert>
</widget>
<eLabel name="agc" text="AGC:" position="120,60" size="60,22" font="Regular;21" halign="right" transparent="1" />
<widget source="Frontend" render="Progress" position="190,60" size="260,20" pixmap="bar_snr.png" borderWidth="2" borderColor="#cccccc">
<convert type="FrontendInfo">AGC</convert>
</widget>
<widget source="Frontend" render="Label" position="460,60" size="60,22" font="Regular;21">
<convert type="FrontendInfo">AGC</convert>
</widget>
<eLabel name="ber" text="BER:" position="120,85" size="60,22" font="Regular;21" halign="right" transparent="1" />
<widget source="Frontend" render="Progress" position="190,85" size="260,20" pixmap="bar_ber.png" borderWidth="2" borderColor="#cccccc">
<convert type="FrontendInfo">BER</convert>
</widget>
<widget source="Frontend" render="Label" position="460,85" size="60,22" font="Regular;21">
<convert type="FrontendInfo">BER</convert>
</widget>
<eLabel name="lock" text="Lock:" position="120,115" size="60,22" font="Regular;21" halign="right" />
<widget source="Frontend" render="Pixmap" pixmap="icons/lock_on.png" position="190,110" zPosition="1" size="38,31" alphatest="on">
<convert type="FrontendInfo">LOCK</convert>
<convert type="ConditionalShowHide" />
</widget>
<widget source="Frontend" render="Pixmap" pixmap="icons/lock_off.png" position="190,110" zPosition="1" size="38,31" alphatest="on">
<convert type="FrontendInfo">LOCK</convert>
<convert type="ConditionalShowHide">Invert</convert>
</widget-->
<widget source="progress_list" render="Listbox" position="0,0" size="510,150" scrollbarMode="showOnDemand">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (10, 0), size = (330, 25), flags = RT_HALIGN_LEFT, text = 1), # index 1 is the index name,
MultiContentEntryText(pos = (330, 0), size = (150, 25), flags = RT_HALIGN_RIGHT, text = 2) # index 2 is the status,
],
"fonts": [gFont("Regular", 20)],
"itemHeight": 25
}
</convert>
</widget>
<eLabel name="overall_progress" text="Overall progress:" position="20,162" size="480,22" font="Regular;21" halign="center" transparent="1" />
<widget source="overall_progress" render="Progress" position="20,192" size="480,20" borderWidth="2" backgroundColor="#254f7497" />
<eLabel name="overall_progress" text="Progress:" position="20,222" size="480,22" font="Regular;21" halign="center" transparent="1" />
<widget source="sub_progress" render="Progress" position="20,252" size="480,20" borderWidth="2" backgroundColor="#254f7497" />
<eLabel name="" text="Failed:" position="20,282" size="140,22" font="Regular;21" halign="left" transparent="1" />
<widget source="failed_counter" render="Label" position="160,282" size="100,20" font="Regular;21" />
<eLabel name="" text="Succeeded:" position="20,312" size="140,22" font="Regular;21" halign="left" transparent="1" />
<widget source="succeeded_counter" render="Label" position="160,312" size="100,20" font="Regular;21" />
<eLabel name="" text="With errors:" position="20,342" size="140,22" font="Regular;21" halign="left" transparent="1" />
<widget source="witherrors_counter" render="Label" position="160,342" size="100,20" font="Regular;21" />
<eLabel name="" text="Not tested:" position="20,372" size="140,22" font="Regular;21" halign="left" transparent="1" />
<widget source="untestable_counter" render="Label" position="160,372" size="100,20" font="Regular;21" />
<widget source="CmdText" render="Label" position="300,282" size="180,200" font="Regular;21" />
</screen>"""
TEST_TYPE_QUICK = 0
TEST_TYPE_RANDOM = 1
TEST_TYPE_COMPLETE = 2
def __init__(self, session, feid, test_type = TEST_TYPE_QUICK, loopsfailed = 3, loopssuccessful = 1, log = False):
Screen.__init__(self, session)
self.feid = feid
self.test_type = test_type
self.loopsfailed = loopsfailed
self.loopssuccessful = loopssuccessful
self.log = log
self["actions"] = NumberActionMap(["SetupActions"],
{
"ok": self.select,
"cancel": self.keyCancel,
}, -2)
TuneTest.__init__(self, feid, stopOnSuccess = self.loopssuccessful, stopOnError = self.loopsfailed)
#self["Frontend"] = FrontendStatus(frontend_source = lambda : self.frontend, update_interval = 100)
self["overall_progress"] = Progress()
self["sub_progress"] = Progress()
self["failed_counter"] = StaticText("0")
self["succeeded_counter"] = StaticText("0")
self["witherrors_counter"] = StaticText("0")
self["untestable_counter"] = StaticText("0")
self.list = []
self["progress_list"] = List(self.list)
self["progress_list"].onSelectionChanged.append(self.selectionChanged)
self["CmdText"] = StaticText(_("Please wait while scanning is in progress..."))
self.indexlist = {}
self.readTransponderList()
self.running = False
self.results = {}
self.resultsstatus = {}
self.onLayoutFinish.append(self.go)
def getProgressListComponent(self, index, status):
return index, self.getTextualIndexRepresentation(index), status
def clearProgressList(self):
self.list = []
self["progress_list"].list = self.list
def addProgressListItem(self, index):
if index in self.indexlist:
for entry in self.list:
if entry[0] == index:
self.changeProgressListStatus(index, "working")
return
self.list.append(self.getProgressListComponent(index, _("working")))
self["progress_list"].list = self.list
self["progress_list"].setIndex(len(self.list) - 1)
def changeProgressListStatus(self, index, status):
self.newlist = []
count = 0
indexpos = 0
for entry in self.list:
if entry[0] == index:
self.newlist.append(self.getProgressListComponent(index, status))
indexpos = count
else:
self.newlist.append(entry)
count += 1
self.list = self.newlist
self["progress_list"].list = self.list
self["progress_list"].setIndex(indexpos)
def readTransponderList(self):
for sat in nimmanager.getSatListForNim(self.feid):
for transponder in nimmanager.getTransponders(sat[0]):
#print transponder
mytransponder = (transponder[1] / 1000, transponder[2] / 1000, transponder[3], transponder[4], transponder[7], sat[0], transponder[5], transponder[6], transponder[8], transponder[9], transponder[10], transponder[11])
self.analyseTransponder(mytransponder)
def getIndexForTransponder(self, transponder):
if transponder[0] < 11700:
band = 1 # low
else:
band = 0 # high
polarisation = transponder[2]
sat = transponder[5]
index = (band, polarisation, sat)
return index
# sort the transponder into self.transponderlist
def analyseTransponder(self, transponder):
index = self.getIndexForTransponder(transponder)
if index not in self.indexlist:
self.indexlist[index] = []
self.indexlist[index].append(transponder)
#print "self.indexlist:", self.indexlist
# returns a string for the user representing a human readable output for index
def getTextualIndexRepresentation(self, index):
print "getTextualIndexRepresentation:", index
text = ""
text += nimmanager.getSatDescription(index[2]) + ", "
if index[0] == 1:
text += "Low Band, "
else:
text += "High Band, "
if index[1] == 0:
text += "H"
else:
text += "V"
return text
def fillTransponderList(self):
self.clearTransponder()
print "----------- fillTransponderList"
print "index:", self.currentlyTestedIndex
keys = self.indexlist.keys()
if self.getContinueScanning():
print "index:", self.getTextualIndexRepresentation(self.currentlyTestedIndex)
for transponder in self.indexlist[self.currentlyTestedIndex]:
self.addTransponder(transponder)
print "transponderList:", self.transponderlist
return True
else:
return False
def progressCallback(self, progress):
if progress[0] != self["sub_progress"].getRange():
self["sub_progress"].setRange(progress[0])
self["sub_progress"].setValue(progress[1])
# logic for scanning order of transponders
# on go getFirstIndex is called
def getFirstIndex(self):
# TODO use other function to scan more randomly
if self.test_type == self.TEST_TYPE_QUICK:
self.myindex = 0
keys = self.indexlist.keys()
keys.sort(key = lambda a: a[2]) # sort by orbpos
self["overall_progress"].setRange(len(keys))
self["overall_progress"].setValue(self.myindex)
return keys[0]
elif self.test_type == self.TEST_TYPE_RANDOM:
self.randomkeys = self.indexlist.keys()
random.shuffle(self.randomkeys)
self.myindex = 0
self["overall_progress"].setRange(len(self.randomkeys))
self["overall_progress"].setValue(self.myindex)
return self.randomkeys[0]
elif self.test_type == self.TEST_TYPE_COMPLETE:
keys = self.indexlist.keys()
print "keys:", keys
successorindex = {}
for index in keys:
successorindex[index] = []
for otherindex in keys:
if otherindex != index:
successorindex[index].append(otherindex)
random.shuffle(successorindex[index])
self.keylist = []
stop = False
currindex = None
while not stop:
if currindex is None or len(successorindex[currindex]) == 0:
oldindex = currindex
for index in successorindex.keys():
if len(successorindex[index]) > 0:
currindex = index
self.keylist.append(currindex)
break
if currindex == oldindex:
stop = True
else:
currindex = successorindex[currindex].pop()
self.keylist.append(currindex)
print "self.keylist:", self.keylist
self.myindex = 0
self["overall_progress"].setRange(len(self.keylist))
self["overall_progress"].setValue(self.myindex)
return self.keylist[0]
# after each index is finished, getNextIndex is called to get the next index to scan
def getNextIndex(self):
# TODO use other function to scan more randomly
if self.test_type == self.TEST_TYPE_QUICK:
self.myindex += 1
keys = self.indexlist.keys()
keys.sort(key = lambda a: a[2]) # sort by orbpos
self["overall_progress"].setValue(self.myindex)
if self.myindex < len(keys):
return keys[self.myindex]
else:
return None
elif self.test_type == self.TEST_TYPE_RANDOM:
self.myindex += 1
keys = self.randomkeys
self["overall_progress"].setValue(self.myindex)
if self.myindex < len(keys):
return keys[self.myindex]
else:
return None
elif self.test_type == self.TEST_TYPE_COMPLETE:
self.myindex += 1
keys = self.keylist
self["overall_progress"].setValue(self.myindex)
if self.myindex < len(keys):
return keys[self.myindex]
else:
return None
# after each index is finished and the next index is returned by getNextIndex
# the algorithm checks, if we should continue scanning
def getContinueScanning(self):
if self.test_type == self.TEST_TYPE_QUICK or self.test_type == self.TEST_TYPE_RANDOM:
return self.myindex < len(self.indexlist.keys())
elif self.test_type == self.TEST_TYPE_COMPLETE:
return self.myindex < len(self.keylist)
def addResult(self, index, status, failedTune, successfullyTune):
self.results[index] = self.results.get(index, {"failed": [], "successful": [], "status": None, "internalstatus": None})
self.resultsstatus[status] = self.resultsstatus.get(status, [])
oldstatus = self.results[index]["internalstatus"]
if oldstatus is None:
self.results[index]["status"] = status
elif oldstatus == "successful":
if status == "failed":
self.results[index]["status"] = "with_errors"
elif status == "successful":
self.results[index]["status"] = oldstatus
elif status == "with_errors":
self.results[index]["status"] = "with_errors"
elif status == "not_tested":
self.results[index]["status"] = oldstatus
elif oldstatus == "failed":
if status == "failed":
self.results[index]["status"] = oldstatus
elif status == "successful":
self.results[index]["status"] = "with_errors"
elif status == "with_errors":
self.results[index]["status"] = "with_errors"
elif status == "not_tested":
self.results[index]["status"] = oldstatus
elif oldstatus == "with_errors":
if status == "failed":
self.results[index]["status"] = oldstatus
elif status == "successful":
self.results[index]["status"] = oldstatus
elif status == "with_errors":
self.results[index]["status"] = oldstatus
elif status == "not_tested":
self.results[index]["status"] = oldstatus
elif oldstatus == "not_tested":
self.results[index]["status"] = status
if self.results[index]["status"] != "working":
self.results[index]["internalstatus"] = self.results[index]["status"]
self.results[index]["failed"] = failedTune + self.results[index]["failed"]
self.results[index]["successful"] = successfullyTune + self.results[index]["successful"]
self.resultsstatus[status].append(index)
def finishedChecking(self):
print "finishedChecking"
TuneTest.finishedChecking(self)
if not self.results.has_key(self.currentlyTestedIndex):
self.results[self.currentlyTestedIndex] = {"failed": [], "successful": [], "status": None, "internalstatus": None}
if len(self.failedTune) > 0 and len(self.successfullyTune) > 0:
self.changeProgressListStatus(self.currentlyTestedIndex, "with errors")
self["witherrors_counter"].setText(str(int(self["witherrors_counter"].getText()) + 1))
self.addResult(self.currentlyTestedIndex, "with_errors", self.failedTune, self.successfullyTune)
elif len(self.failedTune) == 0 and len(self.successfullyTune) == 0:
self.changeProgressListStatus(self.currentlyTestedIndex, "not tested")
self["untestable_counter"].setText(str(int(self["untestable_counter"].getText()) + 1))
self.addResult(self.currentlyTestedIndex, "untestable", self.failedTune, self.successfullyTune)
elif len(self.failedTune) > 0:
self.changeProgressListStatus(self.currentlyTestedIndex, "failed")
#self["failed_counter"].setText(str(int(self["failed_counter"].getText()) + len(self.failedTune)))
self["failed_counter"].setText(str(int(self["failed_counter"].getText()) + 1))
self.addResult(self.currentlyTestedIndex, "failed", self.failedTune, self.successfullyTune)
else:
self.changeProgressListStatus(self.currentlyTestedIndex, "successful")
#self["succeeded_counter"].setText(str(int(self["succeeded_counter"].getText()) + len(self.successfullyTune)))
self["succeeded_counter"].setText(str(int(self["succeeded_counter"].getText()) + 1))
self.addResult(self.currentlyTestedIndex, "successful", self.failedTune, self.successfullyTune)
#self["failed_counter"].setText(str(int(self["failed_counter"].getText()) + len(self.failedTune)))
#self["succeeded_counter"].setText(str(int(self["succeeded_counter"].getText()) + len(self.successfullyTune)))
#if len(self.failedTune) == 0 and len(self.successfullyTune) == 0:
#self["untestable_counter"].setText(str(int(self["untestable_counter"].getText()) + 1))
self.currentlyTestedIndex = self.getNextIndex()
self.addProgressListItem(self.currentlyTestedIndex)
if self.fillTransponderList():
self.run()
else:
self.running = False
self["progress_list"].setIndex(0)
print "results:", self.results
print "resultsstatus:", self.resultsstatus
if self.log:
file = open("/media/hdd/diseqctester.log", "w")
self.setResultType(ResultParser.TYPE_ALL)
file.write(self.getTextualResult())
file.close()
self.session.open(MessageBox, text=_("The results have been written to %s.") % "/media/hdd/diseqctester.log", type = MessageBox.TYPE_INFO)
def go(self):
self.running = True
self["failed_counter"].setText("0")
self["succeeded_counter"].setText("0")
self["untestable_counter"].setText("0")
self.currentlyTestedIndex = self.getFirstIndex()
self.clearProgressList()
self.addProgressListItem(self.currentlyTestedIndex)
if self.fillTransponderList():
self.run()
def keyCancel(self):
self.close()
def select(self):
print "selectedIndex:", self["progress_list"].getCurrent()[0]
if not self.running:
index = self["progress_list"].getCurrent()[0]
#self.setResultType(ResultParser.TYPE_BYORBPOS)
#self.setResultParameter(index[2])
self.setResultType(ResultParser.TYPE_BYINDEX)
self.setResultParameter(index)
#self.setResultType(ResultParser.TYPE_ALL)
self.session.open(TextBox, self.getTextualResult())
def selectionChanged(self):
print "selection changed"
if len(self.list) > 0 and not self.running:
self["CmdText"].setText(_("Press OK to get further details for %s") % str(self["progress_list"].getCurrent()[1]))
class DiseqcTesterTestTypeSelection(Screen, ConfigListScreen):
def __init__(self, session, feid):
Screen.__init__(self, session)
# for the skin: first try MediaPlayerSettings, then Setup, this allows individual skinning
self.skinName = ["DiseqcTesterTestTypeSelection", "Setup" ]
self.setup_title = _("DiSEqC-tester settings")
self.onChangedEntry = [ ]
self.feid = feid
self.list = []
ConfigListScreen.__init__(self, self.list, session = self.session, on_change = self.changedEntry)
self["actions"] = ActionMap(["SetupActions", "MenuActions"],
{
"cancel": self.keyCancel,
"save": self.keyOK,
"ok": self.keyOK,
"menu": self.closeRecursive,
}, -2)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self.createSetup()
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(self.setup_title)
def createSetup(self):
self.testtype = ConfigSelection(choices={"quick": _("Quick"), "random": _("Random"), "complete": _("Complete")}, default = "quick")
self.testtypeEntry = getConfigListEntry(_("Test type"), self.testtype)
self.list.append(self.testtypeEntry)
self.loopsfailed = ConfigSelection(choices={"-1": "Every known", "1": "1", "2": "2", "3": "3", "4": "4", "5": "5", "6": "6", "7": "7", "8": "8"}, default = "3")
self.loopsfailedEntry = getConfigListEntry(_("Stop testing plane after # failed transponders"), self.loopsfailed)
self.list.append(self.loopsfailedEntry)
self.loopssuccessful = ConfigSelection(choices={"-1": "Every known", "1": "1", "2": "2", "3": "3", "4": "4", "5": "5", "6": "6", "7": "7", "8": "8"}, default = "1")
self.loopssuccessfulEntry = getConfigListEntry(_("Stop testing plane after # successful transponders"), self.loopssuccessful)
self.list.append(self.loopssuccessfulEntry)
self.log = ConfigYesNo(False)
if harddiskmanager.HDDCount() > 0:
self.logEntry = getConfigListEntry(_("Log results to harddisk"), self.log)
self.list.append(self.logEntry)
self["config"].list = self.list
self["config"].l.setList(self.list)
def keyOK(self):
print self.testtype.value
testtype = DiseqcTester.TEST_TYPE_QUICK
if self.testtype.value == "quick":
testtype = DiseqcTester.TEST_TYPE_QUICK
elif self.testtype.value == "random":
testtype = DiseqcTester.TEST_TYPE_RANDOM
elif self.testtype.value == "complete":
testtype = DiseqcTester.TEST_TYPE_COMPLETE
self.session.open(DiseqcTester, feid = self.feid, test_type = testtype, loopsfailed = int(self.loopsfailed.value), loopssuccessful = int(self.loopssuccessful.value), log = self.log.value)
def keyCancel(self):
self.close()
# for summary:
def changedEntry(self):
for x in self.onChangedEntry:
x()
def getCurrentEntry(self):
return self["config"].getCurrent()[0]
def getCurrentValue(self):
return str(self["config"].getCurrent()[1].getText())
def createSummary(self):
from Screens.Setup import SetupSummary
return SetupSummary
class DiseqcTesterNimSelection(NimSelection):
skin = """
<screen position="160,123" size="400,330" title="Select a tuner">
<widget source="nimlist" render="Listbox" position="0,0" size="380,300" scrollbarMode="showOnDemand">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (10, 5), size = (360, 30), flags = RT_HALIGN_LEFT, text = 1), # index 1 is the nim name,
MultiContentEntryText(pos = (50, 30), size = (320, 30), font = 1, flags = RT_HALIGN_LEFT, text = 2), # index 2 is a description of the nim settings,
],
"fonts": [gFont("Regular", 20), gFont("Regular", 15)],
"itemHeight": 70
}
</convert>
</widget>
</screen>"""
def __init__(self, session, args = None):
NimSelection.__init__(self, session)
def setResultClass(self):
#self.resultclass = DiseqcTester
self.resultclass = DiseqcTesterTestTypeSelection
def showNim(self, nim):
nimConfig = nimmanager.getNimConfig(nim.slot)
if nim.isCompatible("DVB-S"):
if nimConfig.configMode.value in ("loopthrough", "equal", "satposdepends", "nothing"):
return False
if nimConfig.configMode.value == "simple":
if nimConfig.diseqcMode.value == "positioner":
return True
return True
return False
def DiseqcTesterMain(session, **kwargs):
session.open(DiseqcTesterNimSelection)
def autostart(reason, **kwargs):
resourcemanager.addResource("DiseqcTester", DiseqcTesterMain)
def Plugins(**kwargs):
return [ PluginDescriptor(name="DiSEqC Tester", description=_("Test DiSEqC settings"), where = PluginDescriptor.WHERE_PLUGINMENU, needsRestart = False, fnc=DiseqcTesterMain),
PluginDescriptor(where = PluginDescriptor.WHERE_AUTOSTART, needsRestart = False, fnc = autostart)]
|
gpl-2.0
| 7,836,003,082,799,921,000 | 6,862,996,897,298,435,000 | 38.822581 | 220 | 0.692073 | false |
Belxjander/Kirito
|
Python-3.5.0-main/Lib/multiprocessing/queues.py
|
9
|
11166
|
#
# Module implementing queues
#
# multiprocessing/queues.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = ['Queue', 'SimpleQueue', 'JoinableQueue']
import sys
import os
import threading
import collections
import time
import weakref
import errno
from queue import Empty, Full
import _multiprocessing
from . import connection
from . import context
from .util import debug, info, Finalize, register_after_fork, is_exiting
from .reduction import ForkingPickler
#
# Queue type using a pipe, buffer and thread
#
class Queue(object):
def __init__(self, maxsize=0, *, ctx):
if maxsize <= 0:
# Can raise ImportError (see issues #3770 and #23400)
from .synchronize import SEM_VALUE_MAX as maxsize
self._maxsize = maxsize
self._reader, self._writer = connection.Pipe(duplex=False)
self._rlock = ctx.Lock()
self._opid = os.getpid()
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = ctx.Lock()
self._sem = ctx.BoundedSemaphore(maxsize)
# For use by concurrent.futures
self._ignore_epipe = False
self._after_fork()
if sys.platform != 'win32':
register_after_fork(self, Queue._after_fork)
def __getstate__(self):
context.assert_spawning(self)
return (self._ignore_epipe, self._maxsize, self._reader, self._writer,
self._rlock, self._wlock, self._sem, self._opid)
def __setstate__(self, state):
(self._ignore_epipe, self._maxsize, self._reader, self._writer,
self._rlock, self._wlock, self._sem, self._opid) = state
self._after_fork()
def _after_fork(self):
debug('Queue._after_fork()')
self._notempty = threading.Condition(threading.Lock())
self._buffer = collections.deque()
self._thread = None
self._jointhread = None
self._joincancelled = False
self._closed = False
self._close = None
self._send_bytes = self._writer.send_bytes
self._recv_bytes = self._reader.recv_bytes
self._poll = self._reader.poll
def put(self, obj, block=True, timeout=None):
assert not self._closed
if not self._sem.acquire(block, timeout):
raise Full
with self._notempty:
if self._thread is None:
self._start_thread()
self._buffer.append(obj)
self._notempty.notify()
def get(self, block=True, timeout=None):
if block and timeout is None:
with self._rlock:
res = self._recv_bytes()
self._sem.release()
else:
if block:
deadline = time.time() + timeout
if not self._rlock.acquire(block, timeout):
raise Empty
try:
if block:
timeout = deadline - time.time()
if timeout < 0 or not self._poll(timeout):
raise Empty
elif not self._poll():
raise Empty
res = self._recv_bytes()
self._sem.release()
finally:
self._rlock.release()
# unserialize the data after having released the lock
return ForkingPickler.loads(res)
def qsize(self):
# Raises NotImplementedError on Mac OSX because of broken sem_getvalue()
return self._maxsize - self._sem._semlock._get_value()
def empty(self):
return not self._poll()
def full(self):
return self._sem._semlock._is_zero()
def get_nowait(self):
return self.get(False)
def put_nowait(self, obj):
return self.put(obj, False)
def close(self):
self._closed = True
try:
self._reader.close()
finally:
close = self._close
if close:
self._close = None
close()
def join_thread(self):
debug('Queue.join_thread()')
assert self._closed
if self._jointhread:
self._jointhread()
def cancel_join_thread(self):
debug('Queue.cancel_join_thread()')
self._joincancelled = True
try:
self._jointhread.cancel()
except AttributeError:
pass
def _start_thread(self):
debug('Queue._start_thread()')
# Start thread which transfers data from buffer to pipe
self._buffer.clear()
self._thread = threading.Thread(
target=Queue._feed,
args=(self._buffer, self._notempty, self._send_bytes,
self._wlock, self._writer.close, self._ignore_epipe),
name='QueueFeederThread'
)
self._thread.daemon = True
debug('doing self._thread.start()')
self._thread.start()
debug('... done self._thread.start()')
# On process exit we will wait for data to be flushed to pipe.
#
# However, if this process created the queue then all
# processes which use the queue will be descendants of this
# process. Therefore waiting for the queue to be flushed
# is pointless once all the child processes have been joined.
created_by_this_process = (self._opid == os.getpid())
if not self._joincancelled and not created_by_this_process:
self._jointhread = Finalize(
self._thread, Queue._finalize_join,
[weakref.ref(self._thread)],
exitpriority=-5
)
# Send sentinel to the thread queue object when garbage collected
self._close = Finalize(
self, Queue._finalize_close,
[self._buffer, self._notempty],
exitpriority=10
)
@staticmethod
def _finalize_join(twr):
debug('joining queue thread')
thread = twr()
if thread is not None:
thread.join()
debug('... queue thread joined')
else:
debug('... queue thread already dead')
@staticmethod
def _finalize_close(buffer, notempty):
debug('telling queue thread to quit')
with notempty:
buffer.append(_sentinel)
notempty.notify()
@staticmethod
def _feed(buffer, notempty, send_bytes, writelock, close, ignore_epipe):
debug('starting thread to feed data to pipe')
nacquire = notempty.acquire
nrelease = notempty.release
nwait = notempty.wait
bpopleft = buffer.popleft
sentinel = _sentinel
if sys.platform != 'win32':
wacquire = writelock.acquire
wrelease = writelock.release
else:
wacquire = None
try:
while 1:
nacquire()
try:
if not buffer:
nwait()
finally:
nrelease()
try:
while 1:
obj = bpopleft()
if obj is sentinel:
debug('feeder thread got sentinel -- exiting')
close()
return
# serialize the data before acquiring the lock
obj = ForkingPickler.dumps(obj)
if wacquire is None:
send_bytes(obj)
else:
wacquire()
try:
send_bytes(obj)
finally:
wrelease()
except IndexError:
pass
except Exception as e:
if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE:
return
# Since this runs in a daemon thread the resources it uses
# may be become unusable while the process is cleaning up.
# We ignore errors which happen after the process has
# started to cleanup.
try:
if is_exiting():
info('error in queue thread: %s', e)
else:
import traceback
traceback.print_exc()
except Exception:
pass
_sentinel = object()
#
# A queue type which also supports join() and task_done() methods
#
# Note that if you do not call task_done() for each finished task then
# eventually the counter's semaphore may overflow causing Bad Things
# to happen.
#
class JoinableQueue(Queue):
def __init__(self, maxsize=0, *, ctx):
Queue.__init__(self, maxsize, ctx=ctx)
self._unfinished_tasks = ctx.Semaphore(0)
self._cond = ctx.Condition()
def __getstate__(self):
return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks)
def __setstate__(self, state):
Queue.__setstate__(self, state[:-2])
self._cond, self._unfinished_tasks = state[-2:]
def put(self, obj, block=True, timeout=None):
assert not self._closed
if not self._sem.acquire(block, timeout):
raise Full
with self._notempty, self._cond:
if self._thread is None:
self._start_thread()
self._buffer.append(obj)
self._unfinished_tasks.release()
self._notempty.notify()
def task_done(self):
with self._cond:
if not self._unfinished_tasks.acquire(False):
raise ValueError('task_done() called too many times')
if self._unfinished_tasks._semlock._is_zero():
self._cond.notify_all()
def join(self):
with self._cond:
if not self._unfinished_tasks._semlock._is_zero():
self._cond.wait()
#
# Simplified Queue type -- really just a locked pipe
#
class SimpleQueue(object):
def __init__(self, *, ctx):
self._reader, self._writer = connection.Pipe(duplex=False)
self._rlock = ctx.Lock()
self._poll = self._reader.poll
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = ctx.Lock()
def empty(self):
return not self._poll()
def __getstate__(self):
context.assert_spawning(self)
return (self._reader, self._writer, self._rlock, self._wlock)
def __setstate__(self, state):
(self._reader, self._writer, self._rlock, self._wlock) = state
def get(self):
with self._rlock:
res = self._reader.recv_bytes()
# unserialize the data after having released the lock
return ForkingPickler.loads(res)
def put(self, obj):
# serialize the data before acquiring the lock
obj = ForkingPickler.dumps(obj)
if self._wlock is None:
# writes to a message oriented win32 pipe are atomic
self._writer.send_bytes(obj)
else:
with self._wlock:
self._writer.send_bytes(obj)
|
gpl-3.0
| -7,474,952,118,292,354,000 | 5,741,265,834,941,058,000 | 30.453521 | 80 | 0.541555 | false |
justacec/bokeh
|
bokeh/server/tornado.py
|
2
|
15041
|
''' Provides the Bokeh Server Tornado application.
'''
from __future__ import absolute_import, print_function
import logging
log = logging.getLogger(__name__)
import atexit
# NOTE: needs PyPI backport on Python 2 (https://pypi.python.org/pypi/futures)
from concurrent.futures import ProcessPoolExecutor
import os
from pprint import pformat
import signal
from tornado import gen
from tornado.ioloop import IOLoop, PeriodicCallback
from tornado.web import Application as TornadoApplication
from tornado.web import HTTPError
from tornado.web import StaticFileHandler
from bokeh.resources import Resources
from bokeh.settings import settings
from .urls import per_app_patterns, toplevel_patterns
from .connection import ServerConnection
from .application_context import ApplicationContext
from .views.static_handler import StaticHandler
def match_host(host, pattern):
""" Match host against pattern
>>> match_host('192.168.0.1:80', '192.168.0.1:80')
True
>>> match_host('192.168.0.1:80', '192.168.0.1')
True
>>> match_host('192.168.0.1:80', '192.168.0.1:8080')
False
>>> match_host('192.168.0.1', '192.168.0.2')
False
>>> match_host('192.168.0.1', '192.168.*.*')
True
>>> match_host('alice', 'alice')
True
>>> match_host('alice:80', 'alice')
True
>>> match_host('alice', 'bob')
False
>>> match_host('foo.example.com', 'foo.example.com.net')
False
>>> match_host('alice', '*')
True
>>> match_host('alice', '*:*')
True
>>> match_host('alice:80', '*')
True
>>> match_host('alice:80', '*:80')
True
>>> match_host('alice:8080', '*:80')
False
"""
if ':' in host:
host, host_port = host.rsplit(':', 1)
else:
host_port = None
if ':' in pattern:
pattern, pattern_port = pattern.rsplit(':', 1)
if pattern_port == '*':
pattern_port = None
else:
pattern_port = None
if pattern_port is not None and host_port != pattern_port:
return False
host = host.split('.')
pattern = pattern.split('.')
if len(pattern) > len(host):
return False
for h, p in zip(host, pattern):
if h == p or p == '*':
continue
else:
return False
return True
# factored out to be easier to test
def check_whitelist(request_host, whitelist):
''' Check a given request host against a whitelist.
'''
if ':' not in request_host:
request_host = request_host + ':80'
if request_host in whitelist:
return True
return any(match_host(request_host, host) for host in whitelist)
def _whitelist(handler_class):
if hasattr(handler_class.prepare, 'patched'):
return
old_prepare = handler_class.prepare
def _prepare(self, *args, **kw):
if not check_whitelist(self.request.host, self.application._hosts):
log.info("Rejected connection from host '%s' because it is not in the --host whitelist" % self.request.host)
raise HTTPError(403)
return old_prepare(self, *args, **kw)
_prepare.patched = True
handler_class.prepare = _prepare
class BokehTornado(TornadoApplication):
''' A Tornado Application used to implement the Bokeh Server.
The Server class is the main public interface, this class has
Tornado implementation details.
Args:
applications (dict of str : bokeh.application.Application) : map from paths to Application instances
The application is used to create documents for each session.
extra_patterns (seq[tuple]) : tuples of (str, http or websocket handler)
Use this argument to add additional endpoints to custom deployments
of the Bokeh Server.
prefix (str) : a URL prefix to use for all Bokeh server paths
hosts (list) : hosts that are valid values for the Host header
secret_key (str) : secret key for signing session IDs
sign_sessions (boolean) : whether to sign session IDs
generate_session_ids (boolean) : whether to generate a session ID when none is provided
extra_websocket_origins (list) : hosts that can connect to the websocket
These are in addition to ``hosts``.
keep_alive_milliseconds (int) : number of milliseconds between keep-alive pings
Set to 0 to disable pings. Pings keep the websocket open.
check_unused_sessions_milliseconds (int) : number of milliseconds between check for unused sessions
unused_session_lifetime_milliseconds (int) : number of milliseconds for unused session lifetime
stats_log_frequency_milliseconds (int) : number of milliseconds between logging stats
develop (boolean) : True for develop mode
'''
def __init__(self, applications, prefix, hosts,
extra_websocket_origins,
io_loop=None,
extra_patterns=None,
secret_key=settings.secret_key_bytes(),
sign_sessions=settings.sign_sessions(),
generate_session_ids=True,
# heroku, nginx default to 60s timeout, so well less than that
keep_alive_milliseconds=37000,
# how often to check for unused sessions
check_unused_sessions_milliseconds=17000,
# how long unused sessions last
unused_session_lifetime_milliseconds=15000,
# how often to log stats
stats_log_frequency_milliseconds=15000,
develop=False):
self._prefix = prefix
if io_loop is None:
io_loop = IOLoop.current()
self._loop = io_loop
if keep_alive_milliseconds < 0:
# 0 means "disable"
raise ValueError("keep_alive_milliseconds must be >= 0")
if check_unused_sessions_milliseconds <= 0:
raise ValueError("check_unused_sessions_milliseconds must be > 0")
if unused_session_lifetime_milliseconds <= 0:
raise ValueError("check_unused_sessions_milliseconds must be > 0")
if stats_log_frequency_milliseconds <= 0:
raise ValueError("stats_log_frequency_milliseconds must be > 0")
self._hosts = set(hosts)
self._websocket_origins = self._hosts | set(extra_websocket_origins)
self._resources = {}
self._develop = develop
self._secret_key = secret_key
self._sign_sessions = sign_sessions
self._generate_session_ids = generate_session_ids
log.debug("Allowed Host headers: %r", list(self._hosts))
log.debug("These host origins can connect to the websocket: %r", list(self._websocket_origins))
# Wrap applications in ApplicationContext
self._applications = dict()
for k,v in applications.items():
self._applications[k] = ApplicationContext(v, self._develop, self._loop)
extra_patterns = extra_patterns or []
all_patterns = []
for key, app in applications.items():
app_patterns = []
for p in per_app_patterns:
if key == "/":
route = p[0]
else:
route = key + p[0]
route = self._prefix + route
app_patterns.append((route, p[1], { "application_context" : self._applications[key] }))
websocket_path = None
for r in app_patterns:
if r[0].endswith("/ws"):
websocket_path = r[0]
if not websocket_path:
raise RuntimeError("Couldn't find websocket path")
for r in app_patterns:
r[2]["bokeh_websocket_path"] = websocket_path
all_patterns.extend(app_patterns)
# add a per-app static path if requested by the application
if app.static_path is not None:
if key == "/":
route = "/static/(.*)"
else:
route = key + "/static/(.*)"
route = self._prefix + route
all_patterns.append((route, StaticFileHandler, { "path" : app.static_path }))
for p in extra_patterns + toplevel_patterns:
prefixed_pat = (self._prefix+p[0],) + p[1:]
all_patterns.append(prefixed_pat)
for pat in all_patterns:
_whitelist(pat[1])
log.debug("Patterns are:")
for line in pformat(all_patterns, width=60).split("\n"):
log.debug(" " + line)
super(BokehTornado, self).__init__(all_patterns)
self._clients = set()
self._executor = ProcessPoolExecutor(max_workers=4)
self._loop.add_callback(self._start_async)
self._stats_job = PeriodicCallback(self.log_stats,
stats_log_frequency_milliseconds,
io_loop=self._loop)
self._unused_session_linger_milliseconds = unused_session_lifetime_milliseconds
self._cleanup_job = PeriodicCallback(self.cleanup_sessions,
check_unused_sessions_milliseconds,
io_loop=self._loop)
if keep_alive_milliseconds > 0:
self._ping_job = PeriodicCallback(self.keep_alive, keep_alive_milliseconds, io_loop=self._loop)
else:
self._ping_job = None
@property
def io_loop(self):
return self._loop
@property
def websocket_origins(self):
return self._websocket_origins
@property
def secret_key(self):
return self._secret_key
@property
def sign_sessions(self):
return self._sign_sessions
@property
def generate_session_ids(self):
return self._generate_session_ids
def root_url_for_request(self, request):
return request.protocol + "://" + request.host + self._prefix + "/"
def websocket_url_for_request(self, request, websocket_path):
# websocket_path comes from the handler, and already has any
# prefix included, no need to add here
protocol = "ws"
if request.protocol == "https":
protocol = "wss"
return protocol + "://" + request.host + websocket_path
def resources(self, request):
root_url = self.root_url_for_request(request)
if root_url not in self._resources:
self._resources[root_url] = Resources(mode="server",
root_url=root_url,
path_versioner=StaticHandler.append_version)
return self._resources[root_url]
def start(self, start_loop=True):
''' Start the Bokeh Server application main loop.
Args:
start_loop (boolean): False to not actually start event loop, used in tests
Returns:
None
Notes:
Keyboard interrupts or sigterm will cause the server to shut down.
'''
self._stats_job.start()
self._cleanup_job.start()
if self._ping_job is not None:
self._ping_job.start()
for context in self._applications.values():
context.run_load_hook()
if start_loop:
try:
self._loop.start()
except KeyboardInterrupt:
print("\nInterrupted, shutting down")
def stop(self):
''' Stop the Bokeh Server application.
Returns:
None
'''
# TODO we should probably close all connections and shut
# down all sessions either here or in unlisten() ... but
# it isn't that important since in real life it's rare to
# do a clean shutdown (vs. a kill-by-signal) anyhow.
for context in self._applications.values():
context.run_unload_hook()
self._stats_job.stop()
self._cleanup_job.stop()
if self._ping_job is not None:
self._ping_job.stop()
self._loop.stop()
@property
def executor(self):
return self._executor
def new_connection(self, protocol, socket, application_context, session):
connection = ServerConnection(protocol, socket, application_context, session)
self._clients.add(connection)
return connection
def client_lost(self, connection):
self._clients.discard(connection)
connection.detach_session()
def get_session(self, app_path, session_id):
if app_path not in self._applications:
raise ValueError("Application %s does not exist on this server" % app_path)
return self._applications[app_path].get_session(session_id)
def get_sessions(self, app_path):
if app_path not in self._applications:
raise ValueError("Application %s does not exist on this server" % app_path)
return list(self._applications[app_path].sessions)
@gen.coroutine
def cleanup_sessions(self):
for app in self._applications.values():
yield app.cleanup_sessions(self._unused_session_linger_milliseconds)
raise gen.Return(None)
def log_stats(self):
if log.getEffectiveLevel() > logging.DEBUG:
# avoid the work below if we aren't going to log anything
return
log.debug("[pid %d] %d clients connected", os.getpid(), len(self._clients))
for app_path, app in self._applications.items():
sessions = list(app.sessions)
unused_count = 0
for s in sessions:
if s.connection_count == 0:
unused_count += 1
log.debug("[pid %d] %s has %d sessions with %d unused",
os.getpid(), app_path, len(sessions), unused_count)
def keep_alive(self):
for c in self._clients:
c.send_ping()
@gen.coroutine
def run_in_background(self, _func, *args, **kwargs):
"""
Run a synchronous function in the background without disrupting
the main thread. Useful for long-running jobs.
"""
res = yield self._executor.submit(_func, *args, **kwargs)
raise gen.Return(res)
@gen.coroutine
def _start_async(self):
try:
atexit.register(self._atexit)
signal.signal(signal.SIGTERM, self._sigterm)
except Exception:
self.exit(1)
_atexit_ran = False
def _atexit(self):
if self._atexit_ran:
return
self._atexit_ran = True
self._stats_job.stop()
IOLoop.clear_current()
loop = IOLoop()
loop.make_current()
loop.run_sync(self._cleanup)
def _sigterm(self, signum, frame):
print("Received SIGTERM, shutting down")
self.stop()
self._atexit()
@gen.coroutine
def _cleanup(self):
log.debug("Shutdown: cleaning up")
self._executor.shutdown(wait=False)
self._clients.clear()
|
bsd-3-clause
| -2,789,930,925,835,200,500 | 660,455,092,316,545,000 | 33.656682 | 120 | 0.592248 | false |
minhphung171093/GreenERP_V7
|
openerp/addons/report_webkit/webkit_report.py
|
16
|
15315
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2010 Camptocamp SA (http://www.camptocamp.com)
# All Right Reserved
#
# Author : Nicolas Bessi (Camptocamp)
# Contributor(s) : Florent Xicluna (Wingo SA)
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
import subprocess
import os
import sys
from openerp import report
import tempfile
import time
import logging
from functools import partial
from mako.template import Template
from mako.lookup import TemplateLookup
from mako import exceptions
from openerp import netsvc
from openerp import pooler
from report_helper import WebKitHelper
from openerp.report.report_sxw import *
from openerp import addons
from openerp import tools
from openerp.tools.translate import _
from openerp.osv.osv import except_osv
_logger = logging.getLogger(__name__)
def mako_template(text):
"""Build a Mako template.
This template uses UTF-8 encoding
"""
tmp_lookup = TemplateLookup() #we need it in order to allow inclusion and inheritance
return Template(text, input_encoding='utf-8', output_encoding='utf-8', lookup=tmp_lookup)
class WebKitParser(report_sxw):
"""Custom class that use webkit to render HTML reports
Code partially taken from report openoffice. Thanks guys :)
"""
def __init__(self, name, table, rml=False, parser=False,
header=True, store=False):
self.localcontext = {}
report_sxw.__init__(self, name, table, rml, parser,
header, store)
def get_lib(self, cursor, uid):
"""Return the lib wkhtml path"""
proxy = self.pool.get('ir.config_parameter')
webkit_path = proxy.get_param(cursor, uid, 'webkit_path')
if not webkit_path:
try:
defpath = os.environ.get('PATH', os.defpath).split(os.pathsep)
if hasattr(sys, 'frozen'):
defpath.append(os.getcwd())
if tools.config['root_path']:
defpath.append(os.path.dirname(tools.config['root_path']))
webkit_path = tools.which('wkhtmltopdf', path=os.pathsep.join(defpath))
except IOError:
webkit_path = None
if webkit_path:
return webkit_path
raise except_osv(
_('Wkhtmltopdf library path is not set'),
_('Please install executable on your system' \
' (sudo apt-get install wkhtmltopdf) or download it from here:' \
' http://code.google.com/p/wkhtmltopdf/downloads/list and set the' \
' path in the ir.config_parameter with the webkit_path key.' \
'Minimal version is 0.9.9')
)
def generate_pdf(self, comm_path, report_xml, header, footer, html_list, webkit_header=False):
"""Call webkit in order to generate pdf"""
if not webkit_header:
webkit_header = report_xml.webkit_header
fd, out_filename = tempfile.mkstemp(suffix=".pdf",
prefix="webkit.tmp.")
file_to_del = [out_filename]
if comm_path:
command = [comm_path]
else:
command = ['wkhtmltopdf']
command.append('--quiet')
# default to UTF-8 encoding. Use <meta charset="latin-1"> to override.
command.extend(['--encoding', 'utf-8'])
if header :
with tempfile.NamedTemporaryFile(suffix=".head.html",
delete=False) as head_file:
head_file.write(self._sanitize_html(header))
file_to_del.append(head_file.name)
command.extend(['--header-html', head_file.name])
if footer :
with tempfile.NamedTemporaryFile(suffix=".foot.html",
delete=False) as foot_file:
foot_file.write(self._sanitize_html(footer))
file_to_del.append(foot_file.name)
command.extend(['--footer-html', foot_file.name])
if webkit_header.margin_top :
command.extend(['--margin-top', str(webkit_header.margin_top).replace(',', '.')])
if webkit_header.margin_bottom :
command.extend(['--margin-bottom', str(webkit_header.margin_bottom).replace(',', '.')])
if webkit_header.margin_left :
command.extend(['--margin-left', str(webkit_header.margin_left).replace(',', '.')])
if webkit_header.margin_right :
command.extend(['--margin-right', str(webkit_header.margin_right).replace(',', '.')])
if webkit_header.orientation :
command.extend(['--orientation', str(webkit_header.orientation).replace(',', '.')])
if webkit_header.format :
command.extend(['--page-size', str(webkit_header.format).replace(',', '.')])
count = 0
for html in html_list :
with tempfile.NamedTemporaryFile(suffix="%d.body.html" %count,
delete=False) as html_file:
count += 1
html_file.write(self._sanitize_html(html))
file_to_del.append(html_file.name)
command.append(html_file.name)
command.append(out_filename)
stderr_fd, stderr_path = tempfile.mkstemp(text=True)
file_to_del.append(stderr_path)
try:
status = subprocess.call(command, stderr=stderr_fd)
os.close(stderr_fd) # ensure flush before reading
stderr_fd = None # avoid closing again in finally block
fobj = open(stderr_path, 'r')
error_message = fobj.read()
fobj.close()
if not error_message:
error_message = _('No diagnosis message was provided')
else:
error_message = _('The following diagnosis message was provided:\n') + error_message
if status :
raise except_osv(_('Webkit error' ),
_("The command 'wkhtmltopdf' failed with error code = %s. Message: %s") % (status, error_message))
with open(out_filename, 'rb') as pdf_file:
pdf = pdf_file.read()
os.close(fd)
finally:
if stderr_fd is not None:
os.close(stderr_fd)
for f_to_del in file_to_del:
try:
os.unlink(f_to_del)
except (OSError, IOError), exc:
_logger.error('cannot remove file %s: %s', f_to_del, exc)
return pdf
def translate_call(self, parser_instance, src):
"""Translate String."""
ir_translation = self.pool['ir.translation']
name = self.tmpl and 'addons/' + self.tmpl or None
res = ir_translation._get_source(parser_instance.cr, parser_instance.uid,
name, 'report', parser_instance.localcontext.get('lang', 'en_US'), src)
if res == src:
# no translation defined, fallback on None (backward compatibility)
res = ir_translation._get_source(parser_instance.cr, parser_instance.uid,
None, 'report', parser_instance.localcontext.get('lang', 'en_US'), src)
if not res :
return src
return res
# override needed to keep the attachments storing procedure
def create_single_pdf(self, cursor, uid, ids, data, report_xml, context=None):
"""generate the PDF"""
if context is None:
context={}
htmls = []
if report_xml.report_type != 'webkit':
return super(WebKitParser,self).create_single_pdf(cursor, uid, ids, data, report_xml, context=context)
parser_instance = self.parser(cursor,
uid,
self.name2,
context=context)
self.pool = pooler.get_pool(cursor.dbname)
objs = self.getObjects(cursor, uid, ids, context)
parser_instance.set_context(objs, data, ids, report_xml.report_type)
template = False
if report_xml.report_file :
# backward-compatible if path in Windows format
report_path = report_xml.report_file.replace("\\", "/")
path = addons.get_module_resource(*report_path.split('/'))
if path and os.path.exists(path) :
template = file(path).read()
if not template and report_xml.report_webkit_data :
template = report_xml.report_webkit_data
if not template :
raise except_osv(_('Error!'), _('Webkit report template not found!'))
header = report_xml.webkit_header.html
footer = report_xml.webkit_header.footer_html
if not header and report_xml.header:
raise except_osv(
_('No header defined for this Webkit report!'),
_('Please set a header in company settings.')
)
if not report_xml.header :
header = ''
default_head = addons.get_module_resource('report_webkit', 'default_header.html')
with open(default_head,'r') as f:
header = f.read()
css = report_xml.webkit_header.css
if not css :
css = ''
translate_call = partial(self.translate_call, parser_instance)
#default_filters=['unicode', 'entity'] can be used to set global filter
body_mako_tpl = mako_template(template)
helper = WebKitHelper(cursor, uid, report_xml.id, context)
if report_xml.precise_mode:
for obj in objs:
parser_instance.localcontext['objects'] = [obj]
try :
html = body_mako_tpl.render(helper=helper,
css=css,
_=translate_call,
**parser_instance.localcontext)
htmls.append(html)
except Exception:
msg = exceptions.text_error_template().render()
_logger.error(msg)
raise except_osv(_('Webkit render!'), msg)
else:
try :
html = body_mako_tpl.render(helper=helper,
css=css,
_=translate_call,
**parser_instance.localcontext)
htmls.append(html)
except Exception:
msg = exceptions.text_error_template().render()
_logger.error(msg)
raise except_osv(_('Webkit render!'), msg)
head_mako_tpl = mako_template(header)
try :
head = head_mako_tpl.render(helper=helper,
css=css,
_=translate_call,
_debug=False,
**parser_instance.localcontext)
except Exception:
raise except_osv(_('Webkit render!'),
exceptions.text_error_template().render())
foot = False
if footer :
foot_mako_tpl = mako_template(footer)
try :
foot = foot_mako_tpl.render(helper=helper,
css=css,
_=translate_call,
**parser_instance.localcontext)
except:
msg = exceptions.text_error_template().render()
_logger.error(msg)
raise except_osv(_('Webkit render!'), msg)
if report_xml.webkit_debug :
try :
deb = head_mako_tpl.render(helper=helper,
css=css,
_debug=tools.ustr("\n".join(htmls)),
_=translate_call,
**parser_instance.localcontext)
except Exception:
msg = exceptions.text_error_template().render()
_logger.error(msg)
raise except_osv(_('Webkit render!'), msg)
return (deb, 'html')
bin = self.get_lib(cursor, uid)
pdf = self.generate_pdf(bin, report_xml, head, foot, htmls)
return (pdf, 'pdf')
def create(self, cursor, uid, ids, data, context=None):
"""We override the create function in order to handle generator
Code taken from report openoffice. Thanks guys :) """
pool = pooler.get_pool(cursor.dbname)
ir_obj = pool.get('ir.actions.report.xml')
report_xml_ids = ir_obj.search(cursor, uid,
[('report_name', '=', self.name[7:])], context=context)
if report_xml_ids:
report_xml = ir_obj.browse(cursor,
uid,
report_xml_ids[0],
context=context)
report_xml.report_rml = None
report_xml.report_rml_content = None
report_xml.report_sxw_content_data = None
report_xml.report_sxw_content = None
report_xml.report_sxw = None
else:
return super(WebKitParser, self).create(cursor, uid, ids, data, context)
if report_xml.report_type != 'webkit':
return super(WebKitParser, self).create(cursor, uid, ids, data, context)
result = self.create_source_pdf(cursor, uid, ids, data, report_xml, context)
if not result:
return (False,False)
return result
def _sanitize_html(self, html):
"""wkhtmltopdf expects the html page to declare a doctype.
"""
if html and html[:9].upper() != "<!DOCTYPE":
html = "<!DOCTYPE html>\n" + html
return html
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| -9,038,899,613,574,091,000 | 7,476,603,022,054,457,000 | 43.650146 | 131 | 0.543715 | false |
Hoekz/hackness-monster
|
venv/lib/python2.7/site-packages/werkzeug/routing.py
|
174
|
66350
|
# -*- coding: utf-8 -*-
"""
werkzeug.routing
~~~~~~~~~~~~~~~~
When it comes to combining multiple controller or view functions (however
you want to call them) you need a dispatcher. A simple way would be
applying regular expression tests on the ``PATH_INFO`` and calling
registered callback functions that return the value then.
This module implements a much more powerful system than simple regular
expression matching because it can also convert values in the URLs and
build URLs.
Here a simple example that creates an URL map for an application with
two subdomains (www and kb) and some URL rules:
>>> m = Map([
... # Static URLs
... Rule('/', endpoint='static/index'),
... Rule('/about', endpoint='static/about'),
... Rule('/help', endpoint='static/help'),
... # Knowledge Base
... Subdomain('kb', [
... Rule('/', endpoint='kb/index'),
... Rule('/browse/', endpoint='kb/browse'),
... Rule('/browse/<int:id>/', endpoint='kb/browse'),
... Rule('/browse/<int:id>/<int:page>', endpoint='kb/browse')
... ])
... ], default_subdomain='www')
If the application doesn't use subdomains it's perfectly fine to not set
the default subdomain and not use the `Subdomain` rule factory. The endpoint
in the rules can be anything, for example import paths or unique
identifiers. The WSGI application can use those endpoints to get the
handler for that URL. It doesn't have to be a string at all but it's
recommended.
Now it's possible to create a URL adapter for one of the subdomains and
build URLs:
>>> c = m.bind('example.com')
>>> c.build("kb/browse", dict(id=42))
'http://kb.example.com/browse/42/'
>>> c.build("kb/browse", dict())
'http://kb.example.com/browse/'
>>> c.build("kb/browse", dict(id=42, page=3))
'http://kb.example.com/browse/42/3'
>>> c.build("static/about")
'/about'
>>> c.build("static/index", force_external=True)
'http://www.example.com/'
>>> c = m.bind('example.com', subdomain='kb')
>>> c.build("static/about")
'http://www.example.com/about'
The first argument to bind is the server name *without* the subdomain.
Per default it will assume that the script is mounted on the root, but
often that's not the case so you can provide the real mount point as
second argument:
>>> c = m.bind('example.com', '/applications/example')
The third argument can be the subdomain, if not given the default
subdomain is used. For more details about binding have a look at the
documentation of the `MapAdapter`.
And here is how you can match URLs:
>>> c = m.bind('example.com')
>>> c.match("/")
('static/index', {})
>>> c.match("/about")
('static/about', {})
>>> c = m.bind('example.com', '/', 'kb')
>>> c.match("/")
('kb/index', {})
>>> c.match("/browse/42/23")
('kb/browse', {'id': 42, 'page': 23})
If matching fails you get a `NotFound` exception, if the rule thinks
it's a good idea to redirect (for example because the URL was defined
to have a slash at the end but the request was missing that slash) it
will raise a `RequestRedirect` exception. Both are subclasses of the
`HTTPException` so you can use those errors as responses in the
application.
If matching succeeded but the URL rule was incompatible to the given
method (for example there were only rules for `GET` and `HEAD` and
routing system tried to match a `POST` request) a `MethodNotAllowed`
method is raised.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import difflib
import re
import uuid
import posixpath
from pprint import pformat
from threading import Lock
from werkzeug.urls import url_encode, url_quote, url_join
from werkzeug.utils import redirect, format_string
from werkzeug.exceptions import HTTPException, NotFound, MethodNotAllowed, \
BadHost
from werkzeug._internal import _get_environ, _encode_idna
from werkzeug._compat import itervalues, iteritems, to_unicode, to_bytes, \
text_type, string_types, native_string_result, \
implements_to_string, wsgi_decoding_dance
from werkzeug.datastructures import ImmutableDict, MultiDict
_rule_re = re.compile(r'''
(?P<static>[^<]*) # static rule data
<
(?:
(?P<converter>[a-zA-Z_][a-zA-Z0-9_]*) # converter name
(?:\((?P<args>.*?)\))? # converter arguments
\: # variable delimiter
)?
(?P<variable>[a-zA-Z_][a-zA-Z0-9_]*) # variable name
>
''', re.VERBOSE)
_simple_rule_re = re.compile(r'<([^>]+)>')
_converter_args_re = re.compile(r'''
((?P<name>\w+)\s*=\s*)?
(?P<value>
True|False|
\d+.\d+|
\d+.|
\d+|
\w+|
[urUR]?(?P<stringval>"[^"]*?"|'[^']*')
)\s*,
''', re.VERBOSE | re.UNICODE)
_PYTHON_CONSTANTS = {
'None': None,
'True': True,
'False': False
}
def _pythonize(value):
if value in _PYTHON_CONSTANTS:
return _PYTHON_CONSTANTS[value]
for convert in int, float:
try:
return convert(value)
except ValueError:
pass
if value[:1] == value[-1:] and value[0] in '"\'':
value = value[1:-1]
return text_type(value)
def parse_converter_args(argstr):
argstr += ','
args = []
kwargs = {}
for item in _converter_args_re.finditer(argstr):
value = item.group('stringval')
if value is None:
value = item.group('value')
value = _pythonize(value)
if not item.group('name'):
args.append(value)
else:
name = item.group('name')
kwargs[name] = value
return tuple(args), kwargs
def parse_rule(rule):
"""Parse a rule and return it as generator. Each iteration yields tuples
in the form ``(converter, arguments, variable)``. If the converter is
`None` it's a static url part, otherwise it's a dynamic one.
:internal:
"""
pos = 0
end = len(rule)
do_match = _rule_re.match
used_names = set()
while pos < end:
m = do_match(rule, pos)
if m is None:
break
data = m.groupdict()
if data['static']:
yield None, None, data['static']
variable = data['variable']
converter = data['converter'] or 'default'
if variable in used_names:
raise ValueError('variable name %r used twice.' % variable)
used_names.add(variable)
yield converter, data['args'] or None, variable
pos = m.end()
if pos < end:
remaining = rule[pos:]
if '>' in remaining or '<' in remaining:
raise ValueError('malformed url rule: %r' % rule)
yield None, None, remaining
class RoutingException(Exception):
"""Special exceptions that require the application to redirect, notifying
about missing urls, etc.
:internal:
"""
class RequestRedirect(HTTPException, RoutingException):
"""Raise if the map requests a redirect. This is for example the case if
`strict_slashes` are activated and an url that requires a trailing slash.
The attribute `new_url` contains the absolute destination url.
"""
code = 301
def __init__(self, new_url):
RoutingException.__init__(self, new_url)
self.new_url = new_url
def get_response(self, environ):
return redirect(self.new_url, self.code)
class RequestSlash(RoutingException):
"""Internal exception."""
class RequestAliasRedirect(RoutingException):
"""This rule is an alias and wants to redirect to the canonical URL."""
def __init__(self, matched_values):
self.matched_values = matched_values
class BuildError(RoutingException, LookupError):
"""Raised if the build system cannot find a URL for an endpoint with the
values provided.
"""
def __init__(self, endpoint, values, method, adapter=None):
LookupError.__init__(self, endpoint, values, method)
self.endpoint = endpoint
self.values = values
self.method = method
self.suggested = self.closest_rule(adapter)
def closest_rule(self, adapter):
def score_rule(rule):
return sum([
0.98 * difflib.SequenceMatcher(
None, rule.endpoint, self.endpoint
).ratio(),
0.01 * bool(set(self.values or ()).issubset(rule.arguments)),
0.01 * bool(rule.methods and self.method in rule.methods)
])
if adapter and adapter.map._rules:
return max(adapter.map._rules, key=score_rule)
else:
return None
def __str__(self):
message = []
message.append("Could not build url for endpoint %r" % self.endpoint)
if self.method:
message.append(" (%r)" % self.method)
if self.values:
message.append(" with values %r" % sorted(self.values.keys()))
message.append(".")
if self.suggested:
if self.endpoint == self.suggested.endpoint:
if self.method and self.method not in self.suggested.methods:
message.append(" Did you mean to use methods %r?" % sorted(
self.suggested.methods
))
missing_values = self.suggested.arguments.union(
set(self.suggested.defaults or ())
) - set(self.values.keys())
if missing_values:
message.append(
" Did you forget to specify values %r?" %
sorted(missing_values)
)
else:
message.append(
" Did you mean %r instead?" % self.suggested.endpoint
)
return "".join(message)
class ValidationError(ValueError):
"""Validation error. If a rule converter raises this exception the rule
does not match the current URL and the next URL is tried.
"""
class RuleFactory(object):
"""As soon as you have more complex URL setups it's a good idea to use rule
factories to avoid repetitive tasks. Some of them are builtin, others can
be added by subclassing `RuleFactory` and overriding `get_rules`.
"""
def get_rules(self, map):
"""Subclasses of `RuleFactory` have to override this method and return
an iterable of rules."""
raise NotImplementedError()
class Subdomain(RuleFactory):
"""All URLs provided by this factory have the subdomain set to a
specific domain. For example if you want to use the subdomain for
the current language this can be a good setup::
url_map = Map([
Rule('/', endpoint='#select_language'),
Subdomain('<string(length=2):lang_code>', [
Rule('/', endpoint='index'),
Rule('/about', endpoint='about'),
Rule('/help', endpoint='help')
])
])
All the rules except for the ``'#select_language'`` endpoint will now
listen on a two letter long subdomain that holds the language code
for the current request.
"""
def __init__(self, subdomain, rules):
self.subdomain = subdomain
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.subdomain = self.subdomain
yield rule
class Submount(RuleFactory):
"""Like `Subdomain` but prefixes the URL rule with a given string::
url_map = Map([
Rule('/', endpoint='index'),
Submount('/blog', [
Rule('/', endpoint='blog/index'),
Rule('/entry/<entry_slug>', endpoint='blog/show')
])
])
Now the rule ``'blog/show'`` matches ``/blog/entry/<entry_slug>``.
"""
def __init__(self, path, rules):
self.path = path.rstrip('/')
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.rule = self.path + rule.rule
yield rule
class EndpointPrefix(RuleFactory):
"""Prefixes all endpoints (which must be strings for this factory) with
another string. This can be useful for sub applications::
url_map = Map([
Rule('/', endpoint='index'),
EndpointPrefix('blog/', [Submount('/blog', [
Rule('/', endpoint='index'),
Rule('/entry/<entry_slug>', endpoint='show')
])])
])
"""
def __init__(self, prefix, rules):
self.prefix = prefix
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.endpoint = self.prefix + rule.endpoint
yield rule
class RuleTemplate(object):
"""Returns copies of the rules wrapped and expands string templates in
the endpoint, rule, defaults or subdomain sections.
Here a small example for such a rule template::
from werkzeug.routing import Map, Rule, RuleTemplate
resource = RuleTemplate([
Rule('/$name/', endpoint='$name.list'),
Rule('/$name/<int:id>', endpoint='$name.show')
])
url_map = Map([resource(name='user'), resource(name='page')])
When a rule template is called the keyword arguments are used to
replace the placeholders in all the string parameters.
"""
def __init__(self, rules):
self.rules = list(rules)
def __call__(self, *args, **kwargs):
return RuleTemplateFactory(self.rules, dict(*args, **kwargs))
class RuleTemplateFactory(RuleFactory):
"""A factory that fills in template variables into rules. Used by
`RuleTemplate` internally.
:internal:
"""
def __init__(self, rules, context):
self.rules = rules
self.context = context
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
new_defaults = subdomain = None
if rule.defaults:
new_defaults = {}
for key, value in iteritems(rule.defaults):
if isinstance(value, string_types):
value = format_string(value, self.context)
new_defaults[key] = value
if rule.subdomain is not None:
subdomain = format_string(rule.subdomain, self.context)
new_endpoint = rule.endpoint
if isinstance(new_endpoint, string_types):
new_endpoint = format_string(new_endpoint, self.context)
yield Rule(
format_string(rule.rule, self.context),
new_defaults,
subdomain,
rule.methods,
rule.build_only,
new_endpoint,
rule.strict_slashes
)
@implements_to_string
class Rule(RuleFactory):
"""A Rule represents one URL pattern. There are some options for `Rule`
that change the way it behaves and are passed to the `Rule` constructor.
Note that besides the rule-string all arguments *must* be keyword arguments
in order to not break the application on Werkzeug upgrades.
`string`
Rule strings basically are just normal URL paths with placeholders in
the format ``<converter(arguments):name>`` where the converter and the
arguments are optional. If no converter is defined the `default`
converter is used which means `string` in the normal configuration.
URL rules that end with a slash are branch URLs, others are leaves.
If you have `strict_slashes` enabled (which is the default), all
branch URLs that are matched without a trailing slash will trigger a
redirect to the same URL with the missing slash appended.
The converters are defined on the `Map`.
`endpoint`
The endpoint for this rule. This can be anything. A reference to a
function, a string, a number etc. The preferred way is using a string
because the endpoint is used for URL generation.
`defaults`
An optional dict with defaults for other rules with the same endpoint.
This is a bit tricky but useful if you want to have unique URLs::
url_map = Map([
Rule('/all/', defaults={'page': 1}, endpoint='all_entries'),
Rule('/all/page/<int:page>', endpoint='all_entries')
])
If a user now visits ``http://example.com/all/page/1`` he will be
redirected to ``http://example.com/all/``. If `redirect_defaults` is
disabled on the `Map` instance this will only affect the URL
generation.
`subdomain`
The subdomain rule string for this rule. If not specified the rule
only matches for the `default_subdomain` of the map. If the map is
not bound to a subdomain this feature is disabled.
Can be useful if you want to have user profiles on different subdomains
and all subdomains are forwarded to your application::
url_map = Map([
Rule('/', subdomain='<username>', endpoint='user/homepage'),
Rule('/stats', subdomain='<username>', endpoint='user/stats')
])
`methods`
A sequence of http methods this rule applies to. If not specified, all
methods are allowed. For example this can be useful if you want different
endpoints for `POST` and `GET`. If methods are defined and the path
matches but the method matched against is not in this list or in the
list of another rule for that path the error raised is of the type
`MethodNotAllowed` rather than `NotFound`. If `GET` is present in the
list of methods and `HEAD` is not, `HEAD` is added automatically.
.. versionchanged:: 0.6.1
`HEAD` is now automatically added to the methods if `GET` is
present. The reason for this is that existing code often did not
work properly in servers not rewriting `HEAD` to `GET`
automatically and it was not documented how `HEAD` should be
treated. This was considered a bug in Werkzeug because of that.
`strict_slashes`
Override the `Map` setting for `strict_slashes` only for this rule. If
not specified the `Map` setting is used.
`build_only`
Set this to True and the rule will never match but will create a URL
that can be build. This is useful if you have resources on a subdomain
or folder that are not handled by the WSGI application (like static data)
`redirect_to`
If given this must be either a string or callable. In case of a
callable it's called with the url adapter that triggered the match and
the values of the URL as keyword arguments and has to return the target
for the redirect, otherwise it has to be a string with placeholders in
rule syntax::
def foo_with_slug(adapter, id):
# ask the database for the slug for the old id. this of
# course has nothing to do with werkzeug.
return 'foo/' + Foo.get_slug_for_id(id)
url_map = Map([
Rule('/foo/<slug>', endpoint='foo'),
Rule('/some/old/url/<slug>', redirect_to='foo/<slug>'),
Rule('/other/old/url/<int:id>', redirect_to=foo_with_slug)
])
When the rule is matched the routing system will raise a
`RequestRedirect` exception with the target for the redirect.
Keep in mind that the URL will be joined against the URL root of the
script so don't use a leading slash on the target URL unless you
really mean root of that domain.
`alias`
If enabled this rule serves as an alias for another rule with the same
endpoint and arguments.
`host`
If provided and the URL map has host matching enabled this can be
used to provide a match rule for the whole host. This also means
that the subdomain feature is disabled.
.. versionadded:: 0.7
The `alias` and `host` parameters were added.
"""
def __init__(self, string, defaults=None, subdomain=None, methods=None,
build_only=False, endpoint=None, strict_slashes=None,
redirect_to=None, alias=False, host=None):
if not string.startswith('/'):
raise ValueError('urls must start with a leading slash')
self.rule = string
self.is_leaf = not string.endswith('/')
self.map = None
self.strict_slashes = strict_slashes
self.subdomain = subdomain
self.host = host
self.defaults = defaults
self.build_only = build_only
self.alias = alias
if methods is None:
self.methods = None
else:
self.methods = set([x.upper() for x in methods])
if 'HEAD' not in self.methods and 'GET' in self.methods:
self.methods.add('HEAD')
self.endpoint = endpoint
self.redirect_to = redirect_to
if defaults:
self.arguments = set(map(str, defaults))
else:
self.arguments = set()
self._trace = self._converters = self._regex = self._weights = None
def empty(self):
"""
Return an unbound copy of this rule.
This can be useful if want to reuse an already bound URL for another
map. See ``get_empty_kwargs`` to override what keyword arguments are
provided to the new copy.
"""
return type(self)(self.rule, **self.get_empty_kwargs())
def get_empty_kwargs(self):
"""
Provides kwargs for instantiating empty copy with empty()
Use this method to provide custom keyword arguments to the subclass of
``Rule`` when calling ``some_rule.empty()``. Helpful when the subclass
has custom keyword arguments that are needed at instantiation.
Must return a ``dict`` that will be provided as kwargs to the new
instance of ``Rule``, following the initial ``self.rule`` value which
is always provided as the first, required positional argument.
"""
defaults = None
if self.defaults:
defaults = dict(self.defaults)
return dict(defaults=defaults, subdomain=self.subdomain,
methods=self.methods, build_only=self.build_only,
endpoint=self.endpoint, strict_slashes=self.strict_slashes,
redirect_to=self.redirect_to, alias=self.alias,
host=self.host)
def get_rules(self, map):
yield self
def refresh(self):
"""Rebinds and refreshes the URL. Call this if you modified the
rule in place.
:internal:
"""
self.bind(self.map, rebind=True)
def bind(self, map, rebind=False):
"""Bind the url to a map and create a regular expression based on
the information from the rule itself and the defaults from the map.
:internal:
"""
if self.map is not None and not rebind:
raise RuntimeError('url rule %r already bound to map %r' %
(self, self.map))
self.map = map
if self.strict_slashes is None:
self.strict_slashes = map.strict_slashes
if self.subdomain is None:
self.subdomain = map.default_subdomain
self.compile()
def get_converter(self, variable_name, converter_name, args, kwargs):
"""Looks up the converter for the given parameter.
.. versionadded:: 0.9
"""
if converter_name not in self.map.converters:
raise LookupError('the converter %r does not exist' % converter_name)
return self.map.converters[converter_name](self.map, *args, **kwargs)
def compile(self):
"""Compiles the regular expression and stores it."""
assert self.map is not None, 'rule not bound'
if self.map.host_matching:
domain_rule = self.host or ''
else:
domain_rule = self.subdomain or ''
self._trace = []
self._converters = {}
self._weights = []
regex_parts = []
def _build_regex(rule):
for converter, arguments, variable in parse_rule(rule):
if converter is None:
regex_parts.append(re.escape(variable))
self._trace.append((False, variable))
for part in variable.split('/'):
if part:
self._weights.append((0, -len(part)))
else:
if arguments:
c_args, c_kwargs = parse_converter_args(arguments)
else:
c_args = ()
c_kwargs = {}
convobj = self.get_converter(
variable, converter, c_args, c_kwargs)
regex_parts.append('(?P<%s>%s)' % (variable, convobj.regex))
self._converters[variable] = convobj
self._trace.append((True, variable))
self._weights.append((1, convobj.weight))
self.arguments.add(str(variable))
_build_regex(domain_rule)
regex_parts.append('\\|')
self._trace.append((False, '|'))
_build_regex(self.is_leaf and self.rule or self.rule.rstrip('/'))
if not self.is_leaf:
self._trace.append((False, '/'))
if self.build_only:
return
regex = r'^%s%s$' % (
u''.join(regex_parts),
(not self.is_leaf or not self.strict_slashes) and
'(?<!/)(?P<__suffix__>/?)' or ''
)
self._regex = re.compile(regex, re.UNICODE)
def match(self, path):
"""Check if the rule matches a given path. Path is a string in the
form ``"subdomain|/path(method)"`` and is assembled by the map. If
the map is doing host matching the subdomain part will be the host
instead.
If the rule matches a dict with the converted values is returned,
otherwise the return value is `None`.
:internal:
"""
if not self.build_only:
m = self._regex.search(path)
if m is not None:
groups = m.groupdict()
# we have a folder like part of the url without a trailing
# slash and strict slashes enabled. raise an exception that
# tells the map to redirect to the same url but with a
# trailing slash
if self.strict_slashes and not self.is_leaf and \
not groups.pop('__suffix__'):
raise RequestSlash()
# if we are not in strict slashes mode we have to remove
# a __suffix__
elif not self.strict_slashes:
del groups['__suffix__']
result = {}
for name, value in iteritems(groups):
try:
value = self._converters[name].to_python(value)
except ValidationError:
return
result[str(name)] = value
if self.defaults:
result.update(self.defaults)
if self.alias and self.map.redirect_defaults:
raise RequestAliasRedirect(result)
return result
def build(self, values, append_unknown=True):
"""Assembles the relative url for that rule and the subdomain.
If building doesn't work for some reasons `None` is returned.
:internal:
"""
tmp = []
add = tmp.append
processed = set(self.arguments)
for is_dynamic, data in self._trace:
if is_dynamic:
try:
add(self._converters[data].to_url(values[data]))
except ValidationError:
return
processed.add(data)
else:
add(url_quote(to_bytes(data, self.map.charset), safe='/:|+'))
domain_part, url = (u''.join(tmp)).split(u'|', 1)
if append_unknown:
query_vars = MultiDict(values)
for key in processed:
if key in query_vars:
del query_vars[key]
if query_vars:
url += u'?' + url_encode(query_vars, charset=self.map.charset,
sort=self.map.sort_parameters,
key=self.map.sort_key)
return domain_part, url
def provides_defaults_for(self, rule):
"""Check if this rule has defaults for a given rule.
:internal:
"""
return not self.build_only and self.defaults and \
self.endpoint == rule.endpoint and self != rule and \
self.arguments == rule.arguments
def suitable_for(self, values, method=None):
"""Check if the dict of values has enough data for url generation.
:internal:
"""
# if a method was given explicitly and that method is not supported
# by this rule, this rule is not suitable.
if method is not None and self.methods is not None \
and method not in self.methods:
return False
defaults = self.defaults or ()
# all arguments required must be either in the defaults dict or
# the value dictionary otherwise it's not suitable
for key in self.arguments:
if key not in defaults and key not in values:
return False
# in case defaults are given we ensure taht either the value was
# skipped or the value is the same as the default value.
if defaults:
for key, value in iteritems(defaults):
if key in values and value != values[key]:
return False
return True
def match_compare_key(self):
"""The match compare key for sorting.
Current implementation:
1. rules without any arguments come first for performance
reasons only as we expect them to match faster and some
common ones usually don't have any arguments (index pages etc.)
2. The more complex rules come first so the second argument is the
negative length of the number of weights.
3. lastly we order by the actual weights.
:internal:
"""
return bool(self.arguments), -len(self._weights), self._weights
def build_compare_key(self):
"""The build compare key for sorting.
:internal:
"""
return self.alias and 1 or 0, -len(self.arguments), \
-len(self.defaults or ())
def __eq__(self, other):
return self.__class__ is other.__class__ and \
self._trace == other._trace
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return self.rule
@native_string_result
def __repr__(self):
if self.map is None:
return u'<%s (unbound)>' % self.__class__.__name__
tmp = []
for is_dynamic, data in self._trace:
if is_dynamic:
tmp.append(u'<%s>' % data)
else:
tmp.append(data)
return u'<%s %s%s -> %s>' % (
self.__class__.__name__,
repr((u''.join(tmp)).lstrip(u'|')).lstrip(u'u'),
self.methods is not None
and u' (%s)' % u', '.join(self.methods)
or u'',
self.endpoint
)
class BaseConverter(object):
"""Base class for all converters."""
regex = '[^/]+'
weight = 100
def __init__(self, map):
self.map = map
def to_python(self, value):
return value
def to_url(self, value):
return url_quote(value, charset=self.map.charset)
class UnicodeConverter(BaseConverter):
"""This converter is the default converter and accepts any string but
only one path segment. Thus the string can not include a slash.
This is the default validator.
Example::
Rule('/pages/<page>'),
Rule('/<string(length=2):lang_code>')
:param map: the :class:`Map`.
:param minlength: the minimum length of the string. Must be greater
or equal 1.
:param maxlength: the maximum length of the string.
:param length: the exact length of the string.
"""
def __init__(self, map, minlength=1, maxlength=None, length=None):
BaseConverter.__init__(self, map)
if length is not None:
length = '{%d}' % int(length)
else:
if maxlength is None:
maxlength = ''
else:
maxlength = int(maxlength)
length = '{%s,%s}' % (
int(minlength),
maxlength
)
self.regex = '[^/]' + length
class AnyConverter(BaseConverter):
"""Matches one of the items provided. Items can either be Python
identifiers or strings::
Rule('/<any(about, help, imprint, class, "foo,bar"):page_name>')
:param map: the :class:`Map`.
:param items: this function accepts the possible items as positional
arguments.
"""
def __init__(self, map, *items):
BaseConverter.__init__(self, map)
self.regex = '(?:%s)' % '|'.join([re.escape(x) for x in items])
class PathConverter(BaseConverter):
"""Like the default :class:`UnicodeConverter`, but it also matches
slashes. This is useful for wikis and similar applications::
Rule('/<path:wikipage>')
Rule('/<path:wikipage>/edit')
:param map: the :class:`Map`.
"""
regex = '[^/].*?'
weight = 200
class NumberConverter(BaseConverter):
"""Baseclass for `IntegerConverter` and `FloatConverter`.
:internal:
"""
weight = 50
def __init__(self, map, fixed_digits=0, min=None, max=None):
BaseConverter.__init__(self, map)
self.fixed_digits = fixed_digits
self.min = min
self.max = max
def to_python(self, value):
if (self.fixed_digits and len(value) != self.fixed_digits):
raise ValidationError()
value = self.num_convert(value)
if (self.min is not None and value < self.min) or \
(self.max is not None and value > self.max):
raise ValidationError()
return value
def to_url(self, value):
value = self.num_convert(value)
if self.fixed_digits:
value = ('%%0%sd' % self.fixed_digits) % value
return str(value)
class IntegerConverter(NumberConverter):
"""This converter only accepts integer values::
Rule('/page/<int:page>')
This converter does not support negative values.
:param map: the :class:`Map`.
:param fixed_digits: the number of fixed digits in the URL. If you set
this to ``4`` for example, the application will
only match if the url looks like ``/0001/``. The
default is variable length.
:param min: the minimal value.
:param max: the maximal value.
"""
regex = r'\d+'
num_convert = int
class FloatConverter(NumberConverter):
"""This converter only accepts floating point values::
Rule('/probability/<float:probability>')
This converter does not support negative values.
:param map: the :class:`Map`.
:param min: the minimal value.
:param max: the maximal value.
"""
regex = r'\d+\.\d+'
num_convert = float
def __init__(self, map, min=None, max=None):
NumberConverter.__init__(self, map, 0, min, max)
class UUIDConverter(BaseConverter):
"""This converter only accepts UUID strings::
Rule('/object/<uuid:identifier>')
.. versionadded:: 0.10
:param map: the :class:`Map`.
"""
regex = r'[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-' \
r'[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}'
def to_python(self, value):
return uuid.UUID(value)
def to_url(self, value):
return str(value)
#: the default converter mapping for the map.
DEFAULT_CONVERTERS = {
'default': UnicodeConverter,
'string': UnicodeConverter,
'any': AnyConverter,
'path': PathConverter,
'int': IntegerConverter,
'float': FloatConverter,
'uuid': UUIDConverter,
}
class Map(object):
"""The map class stores all the URL rules and some configuration
parameters. Some of the configuration values are only stored on the
`Map` instance since those affect all rules, others are just defaults
and can be overridden for each rule. Note that you have to specify all
arguments besides the `rules` as keyword arguments!
:param rules: sequence of url rules for this map.
:param default_subdomain: The default subdomain for rules without a
subdomain defined.
:param charset: charset of the url. defaults to ``"utf-8"``
:param strict_slashes: Take care of trailing slashes.
:param redirect_defaults: This will redirect to the default rule if it
wasn't visited that way. This helps creating
unique URLs.
:param converters: A dict of converters that adds additional converters
to the list of converters. If you redefine one
converter this will override the original one.
:param sort_parameters: If set to `True` the url parameters are sorted.
See `url_encode` for more details.
:param sort_key: The sort key function for `url_encode`.
:param encoding_errors: the error method to use for decoding
:param host_matching: if set to `True` it enables the host matching
feature and disables the subdomain one. If
enabled the `host` parameter to rules is used
instead of the `subdomain` one.
.. versionadded:: 0.5
`sort_parameters` and `sort_key` was added.
.. versionadded:: 0.7
`encoding_errors` and `host_matching` was added.
"""
#: .. versionadded:: 0.6
#: a dict of default converters to be used.
default_converters = ImmutableDict(DEFAULT_CONVERTERS)
def __init__(self, rules=None, default_subdomain='', charset='utf-8',
strict_slashes=True, redirect_defaults=True,
converters=None, sort_parameters=False, sort_key=None,
encoding_errors='replace', host_matching=False):
self._rules = []
self._rules_by_endpoint = {}
self._remap = True
self._remap_lock = Lock()
self.default_subdomain = default_subdomain
self.charset = charset
self.encoding_errors = encoding_errors
self.strict_slashes = strict_slashes
self.redirect_defaults = redirect_defaults
self.host_matching = host_matching
self.converters = self.default_converters.copy()
if converters:
self.converters.update(converters)
self.sort_parameters = sort_parameters
self.sort_key = sort_key
for rulefactory in rules or ():
self.add(rulefactory)
def is_endpoint_expecting(self, endpoint, *arguments):
"""Iterate over all rules and check if the endpoint expects
the arguments provided. This is for example useful if you have
some URLs that expect a language code and others that do not and
you want to wrap the builder a bit so that the current language
code is automatically added if not provided but endpoints expect
it.
:param endpoint: the endpoint to check.
:param arguments: this function accepts one or more arguments
as positional arguments. Each one of them is
checked.
"""
self.update()
arguments = set(arguments)
for rule in self._rules_by_endpoint[endpoint]:
if arguments.issubset(rule.arguments):
return True
return False
def iter_rules(self, endpoint=None):
"""Iterate over all rules or the rules of an endpoint.
:param endpoint: if provided only the rules for that endpoint
are returned.
:return: an iterator
"""
self.update()
if endpoint is not None:
return iter(self._rules_by_endpoint[endpoint])
return iter(self._rules)
def add(self, rulefactory):
"""Add a new rule or factory to the map and bind it. Requires that the
rule is not bound to another map.
:param rulefactory: a :class:`Rule` or :class:`RuleFactory`
"""
for rule in rulefactory.get_rules(self):
rule.bind(self)
self._rules.append(rule)
self._rules_by_endpoint.setdefault(rule.endpoint, []).append(rule)
self._remap = True
def bind(self, server_name, script_name=None, subdomain=None,
url_scheme='http', default_method='GET', path_info=None,
query_args=None):
"""Return a new :class:`MapAdapter` with the details specified to the
call. Note that `script_name` will default to ``'/'`` if not further
specified or `None`. The `server_name` at least is a requirement
because the HTTP RFC requires absolute URLs for redirects and so all
redirect exceptions raised by Werkzeug will contain the full canonical
URL.
If no path_info is passed to :meth:`match` it will use the default path
info passed to bind. While this doesn't really make sense for
manual bind calls, it's useful if you bind a map to a WSGI
environment which already contains the path info.
`subdomain` will default to the `default_subdomain` for this map if
no defined. If there is no `default_subdomain` you cannot use the
subdomain feature.
.. versionadded:: 0.7
`query_args` added
.. versionadded:: 0.8
`query_args` can now also be a string.
"""
server_name = server_name.lower()
if self.host_matching:
if subdomain is not None:
raise RuntimeError('host matching enabled and a '
'subdomain was provided')
elif subdomain is None:
subdomain = self.default_subdomain
if script_name is None:
script_name = '/'
try:
server_name = _encode_idna(server_name)
except UnicodeError:
raise BadHost()
return MapAdapter(self, server_name, script_name, subdomain,
url_scheme, path_info, default_method, query_args)
def bind_to_environ(self, environ, server_name=None, subdomain=None):
"""Like :meth:`bind` but you can pass it an WSGI environment and it
will fetch the information from that dictionary. Note that because of
limitations in the protocol there is no way to get the current
subdomain and real `server_name` from the environment. If you don't
provide it, Werkzeug will use `SERVER_NAME` and `SERVER_PORT` (or
`HTTP_HOST` if provided) as used `server_name` with disabled subdomain
feature.
If `subdomain` is `None` but an environment and a server name is
provided it will calculate the current subdomain automatically.
Example: `server_name` is ``'example.com'`` and the `SERVER_NAME`
in the wsgi `environ` is ``'staging.dev.example.com'`` the calculated
subdomain will be ``'staging.dev'``.
If the object passed as environ has an environ attribute, the value of
this attribute is used instead. This allows you to pass request
objects. Additionally `PATH_INFO` added as a default of the
:class:`MapAdapter` so that you don't have to pass the path info to
the match method.
.. versionchanged:: 0.5
previously this method accepted a bogus `calculate_subdomain`
parameter that did not have any effect. It was removed because
of that.
.. versionchanged:: 0.8
This will no longer raise a ValueError when an unexpected server
name was passed.
:param environ: a WSGI environment.
:param server_name: an optional server name hint (see above).
:param subdomain: optionally the current subdomain (see above).
"""
environ = _get_environ(environ)
if 'HTTP_HOST' in environ:
wsgi_server_name = environ['HTTP_HOST']
if environ['wsgi.url_scheme'] == 'http' \
and wsgi_server_name.endswith(':80'):
wsgi_server_name = wsgi_server_name[:-3]
elif environ['wsgi.url_scheme'] == 'https' \
and wsgi_server_name.endswith(':443'):
wsgi_server_name = wsgi_server_name[:-4]
else:
wsgi_server_name = environ['SERVER_NAME']
if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \
in (('https', '443'), ('http', '80')):
wsgi_server_name += ':' + environ['SERVER_PORT']
wsgi_server_name = wsgi_server_name.lower()
if server_name is None:
server_name = wsgi_server_name
else:
server_name = server_name.lower()
if subdomain is None and not self.host_matching:
cur_server_name = wsgi_server_name.split('.')
real_server_name = server_name.split('.')
offset = -len(real_server_name)
if cur_server_name[offset:] != real_server_name:
# This can happen even with valid configs if the server was
# accesssed directly by IP address under some situations.
# Instead of raising an exception like in Werkzeug 0.7 or
# earlier we go by an invalid subdomain which will result
# in a 404 error on matching.
subdomain = '<invalid>'
else:
subdomain = '.'.join(filter(None, cur_server_name[:offset]))
def _get_wsgi_string(name):
val = environ.get(name)
if val is not None:
return wsgi_decoding_dance(val, self.charset)
script_name = _get_wsgi_string('SCRIPT_NAME')
path_info = _get_wsgi_string('PATH_INFO')
query_args = _get_wsgi_string('QUERY_STRING')
return Map.bind(self, server_name, script_name,
subdomain, environ['wsgi.url_scheme'],
environ['REQUEST_METHOD'], path_info,
query_args=query_args)
def update(self):
"""Called before matching and building to keep the compiled rules
in the correct order after things changed.
"""
if not self._remap:
return
with self._remap_lock:
if not self._remap:
return
self._rules.sort(key=lambda x: x.match_compare_key())
for rules in itervalues(self._rules_by_endpoint):
rules.sort(key=lambda x: x.build_compare_key())
self._remap = False
def __repr__(self):
rules = self.iter_rules()
return '%s(%s)' % (self.__class__.__name__, pformat(list(rules)))
class MapAdapter(object):
"""Returned by :meth:`Map.bind` or :meth:`Map.bind_to_environ` and does
the URL matching and building based on runtime information.
"""
def __init__(self, map, server_name, script_name, subdomain,
url_scheme, path_info, default_method, query_args=None):
self.map = map
self.server_name = to_unicode(server_name)
script_name = to_unicode(script_name)
if not script_name.endswith(u'/'):
script_name += u'/'
self.script_name = script_name
self.subdomain = to_unicode(subdomain)
self.url_scheme = to_unicode(url_scheme)
self.path_info = to_unicode(path_info)
self.default_method = to_unicode(default_method)
self.query_args = query_args
def dispatch(self, view_func, path_info=None, method=None,
catch_http_exceptions=False):
"""Does the complete dispatching process. `view_func` is called with
the endpoint and a dict with the values for the view. It should
look up the view function, call it, and return a response object
or WSGI application. http exceptions are not caught by default
so that applications can display nicer error messages by just
catching them by hand. If you want to stick with the default
error messages you can pass it ``catch_http_exceptions=True`` and
it will catch the http exceptions.
Here a small example for the dispatch usage::
from werkzeug.wrappers import Request, Response
from werkzeug.wsgi import responder
from werkzeug.routing import Map, Rule
def on_index(request):
return Response('Hello from the index')
url_map = Map([Rule('/', endpoint='index')])
views = {'index': on_index}
@responder
def application(environ, start_response):
request = Request(environ)
urls = url_map.bind_to_environ(environ)
return urls.dispatch(lambda e, v: views[e](request, **v),
catch_http_exceptions=True)
Keep in mind that this method might return exception objects, too, so
use :class:`Response.force_type` to get a response object.
:param view_func: a function that is called with the endpoint as
first argument and the value dict as second. Has
to dispatch to the actual view function with this
information. (see above)
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
:param catch_http_exceptions: set to `True` to catch any of the
werkzeug :class:`HTTPException`\s.
"""
try:
try:
endpoint, args = self.match(path_info, method)
except RequestRedirect as e:
return e
return view_func(endpoint, args)
except HTTPException as e:
if catch_http_exceptions:
return e
raise
def match(self, path_info=None, method=None, return_rule=False,
query_args=None):
"""The usage is simple: you just pass the match method the current
path info as well as the method (which defaults to `GET`). The
following things can then happen:
- you receive a `NotFound` exception that indicates that no URL is
matching. A `NotFound` exception is also a WSGI application you
can call to get a default page not found page (happens to be the
same object as `werkzeug.exceptions.NotFound`)
- you receive a `MethodNotAllowed` exception that indicates that there
is a match for this URL but not for the current request method.
This is useful for RESTful applications.
- you receive a `RequestRedirect` exception with a `new_url`
attribute. This exception is used to notify you about a request
Werkzeug requests from your WSGI application. This is for example the
case if you request ``/foo`` although the correct URL is ``/foo/``
You can use the `RequestRedirect` instance as response-like object
similar to all other subclasses of `HTTPException`.
- you get a tuple in the form ``(endpoint, arguments)`` if there is
a match (unless `return_rule` is True, in which case you get a tuple
in the form ``(rule, arguments)``)
If the path info is not passed to the match method the default path
info of the map is used (defaults to the root URL if not defined
explicitly).
All of the exceptions raised are subclasses of `HTTPException` so they
can be used as WSGI responses. The will all render generic error or
redirect pages.
Here is a small example for matching:
>>> m = Map([
... Rule('/', endpoint='index'),
... Rule('/downloads/', endpoint='downloads/index'),
... Rule('/downloads/<int:id>', endpoint='downloads/show')
... ])
>>> urls = m.bind("example.com", "/")
>>> urls.match("/", "GET")
('index', {})
>>> urls.match("/downloads/42")
('downloads/show', {'id': 42})
And here is what happens on redirect and missing URLs:
>>> urls.match("/downloads")
Traceback (most recent call last):
...
RequestRedirect: http://example.com/downloads/
>>> urls.match("/missing")
Traceback (most recent call last):
...
NotFound: 404 Not Found
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
:param return_rule: return the rule that matched instead of just the
endpoint (defaults to `False`).
:param query_args: optional query arguments that are used for
automatic redirects as string or dictionary. It's
currently not possible to use the query arguments
for URL matching.
.. versionadded:: 0.6
`return_rule` was added.
.. versionadded:: 0.7
`query_args` was added.
.. versionchanged:: 0.8
`query_args` can now also be a string.
"""
self.map.update()
if path_info is None:
path_info = self.path_info
else:
path_info = to_unicode(path_info, self.map.charset)
if query_args is None:
query_args = self.query_args
method = (method or self.default_method).upper()
path = u'%s|%s' % (
self.map.host_matching and self.server_name or self.subdomain,
path_info and '/%s' % path_info.lstrip('/')
)
have_match_for = set()
for rule in self.map._rules:
try:
rv = rule.match(path)
except RequestSlash:
raise RequestRedirect(self.make_redirect_url(
url_quote(path_info, self.map.charset,
safe='/:|+') + '/', query_args))
except RequestAliasRedirect as e:
raise RequestRedirect(self.make_alias_redirect_url(
path, rule.endpoint, e.matched_values, method, query_args))
if rv is None:
continue
if rule.methods is not None and method not in rule.methods:
have_match_for.update(rule.methods)
continue
if self.map.redirect_defaults:
redirect_url = self.get_default_redirect(rule, method, rv,
query_args)
if redirect_url is not None:
raise RequestRedirect(redirect_url)
if rule.redirect_to is not None:
if isinstance(rule.redirect_to, string_types):
def _handle_match(match):
value = rv[match.group(1)]
return rule._converters[match.group(1)].to_url(value)
redirect_url = _simple_rule_re.sub(_handle_match,
rule.redirect_to)
else:
redirect_url = rule.redirect_to(self, **rv)
raise RequestRedirect(str(url_join('%s://%s%s%s' % (
self.url_scheme or 'http',
self.subdomain and self.subdomain + '.' or '',
self.server_name,
self.script_name
), redirect_url)))
if return_rule:
return rule, rv
else:
return rule.endpoint, rv
if have_match_for:
raise MethodNotAllowed(valid_methods=list(have_match_for))
raise NotFound()
def test(self, path_info=None, method=None):
"""Test if a rule would match. Works like `match` but returns `True`
if the URL matches, or `False` if it does not exist.
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
"""
try:
self.match(path_info, method)
except RequestRedirect:
pass
except HTTPException:
return False
return True
def allowed_methods(self, path_info=None):
"""Returns the valid methods that match for a given path.
.. versionadded:: 0.7
"""
try:
self.match(path_info, method='--')
except MethodNotAllowed as e:
return e.valid_methods
except HTTPException as e:
pass
return []
def get_host(self, domain_part):
"""Figures out the full host name for the given domain part. The
domain part is a subdomain in case host matching is disabled or
a full host name.
"""
if self.map.host_matching:
if domain_part is None:
return self.server_name
return to_unicode(domain_part, 'ascii')
subdomain = domain_part
if subdomain is None:
subdomain = self.subdomain
else:
subdomain = to_unicode(subdomain, 'ascii')
return (subdomain and subdomain + u'.' or u'') + self.server_name
def get_default_redirect(self, rule, method, values, query_args):
"""A helper that returns the URL to redirect to if it finds one.
This is used for default redirecting only.
:internal:
"""
assert self.map.redirect_defaults
for r in self.map._rules_by_endpoint[rule.endpoint]:
# every rule that comes after this one, including ourself
# has a lower priority for the defaults. We order the ones
# with the highest priority up for building.
if r is rule:
break
if r.provides_defaults_for(rule) and \
r.suitable_for(values, method):
values.update(r.defaults)
domain_part, path = r.build(values)
return self.make_redirect_url(
path, query_args, domain_part=domain_part)
def encode_query_args(self, query_args):
if not isinstance(query_args, string_types):
query_args = url_encode(query_args, self.map.charset)
return query_args
def make_redirect_url(self, path_info, query_args=None, domain_part=None):
"""Creates a redirect URL.
:internal:
"""
suffix = ''
if query_args:
suffix = '?' + self.encode_query_args(query_args)
return str('%s://%s/%s%s' % (
self.url_scheme or 'http',
self.get_host(domain_part),
posixpath.join(self.script_name[:-1].lstrip('/'),
path_info.lstrip('/')),
suffix
))
def make_alias_redirect_url(self, path, endpoint, values, method, query_args):
"""Internally called to make an alias redirect URL."""
url = self.build(endpoint, values, method, append_unknown=False,
force_external=True)
if query_args:
url += '?' + self.encode_query_args(query_args)
assert url != path, 'detected invalid alias setting. No canonical ' \
'URL found'
return url
def _partial_build(self, endpoint, values, method, append_unknown):
"""Helper for :meth:`build`. Returns subdomain and path for the
rule that accepts this endpoint, values and method.
:internal:
"""
# in case the method is none, try with the default method first
if method is None:
rv = self._partial_build(endpoint, values, self.default_method,
append_unknown)
if rv is not None:
return rv
# default method did not match or a specific method is passed,
# check all and go with first result.
for rule in self.map._rules_by_endpoint.get(endpoint, ()):
if rule.suitable_for(values, method):
rv = rule.build(values, append_unknown)
if rv is not None:
return rv
def build(self, endpoint, values=None, method=None, force_external=False,
append_unknown=True):
"""Building URLs works pretty much the other way round. Instead of
`match` you call `build` and pass it the endpoint and a dict of
arguments for the placeholders.
The `build` function also accepts an argument called `force_external`
which, if you set it to `True` will force external URLs. Per default
external URLs (include the server name) will only be used if the
target URL is on a different subdomain.
>>> m = Map([
... Rule('/', endpoint='index'),
... Rule('/downloads/', endpoint='downloads/index'),
... Rule('/downloads/<int:id>', endpoint='downloads/show')
... ])
>>> urls = m.bind("example.com", "/")
>>> urls.build("index", {})
'/'
>>> urls.build("downloads/show", {'id': 42})
'/downloads/42'
>>> urls.build("downloads/show", {'id': 42}, force_external=True)
'http://example.com/downloads/42'
Because URLs cannot contain non ASCII data you will always get
bytestrings back. Non ASCII characters are urlencoded with the
charset defined on the map instance.
Additional values are converted to unicode and appended to the URL as
URL querystring parameters:
>>> urls.build("index", {'q': 'My Searchstring'})
'/?q=My+Searchstring'
When processing those additional values, lists are furthermore
interpreted as multiple values (as per
:py:class:`werkzeug.datastructures.MultiDict`):
>>> urls.build("index", {'q': ['a', 'b', 'c']})
'/?q=a&q=b&q=c'
If a rule does not exist when building a `BuildError` exception is
raised.
The build method accepts an argument called `method` which allows you
to specify the method you want to have an URL built for if you have
different methods for the same endpoint specified.
.. versionadded:: 0.6
the `append_unknown` parameter was added.
:param endpoint: the endpoint of the URL to build.
:param values: the values for the URL to build. Unhandled values are
appended to the URL as query parameters.
:param method: the HTTP method for the rule if there are different
URLs for different methods on the same endpoint.
:param force_external: enforce full canonical external URLs. If the URL
scheme is not provided, this will generate
a protocol-relative URL.
:param append_unknown: unknown parameters are appended to the generated
URL as query string argument. Disable this
if you want the builder to ignore those.
"""
self.map.update()
if values:
if isinstance(values, MultiDict):
valueiter = iteritems(values, multi=True)
else:
valueiter = iteritems(values)
values = dict((k, v) for k, v in valueiter if v is not None)
else:
values = {}
rv = self._partial_build(endpoint, values, method, append_unknown)
if rv is None:
raise BuildError(endpoint, values, method, self)
domain_part, path = rv
host = self.get_host(domain_part)
# shortcut this.
if not force_external and (
(self.map.host_matching and host == self.server_name) or
(not self.map.host_matching and domain_part == self.subdomain)
):
return str(url_join(self.script_name, './' + path.lstrip('/')))
return str('%s//%s%s/%s' % (
self.url_scheme + ':' if self.url_scheme else '',
host,
self.script_name[:-1],
path.lstrip('/')
))
|
mit
| -8,969,963,249,151,519,000 | 8,739,589,139,706,738,000 | 36.401353 | 82 | 0.577815 | false |
Qalthos/ansible
|
test/units/plugins/connection/test_winrm.py
|
47
|
15343
|
# -*- coding: utf-8 -*-
# (c) 2018, Jordan Borean <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from io import StringIO
from units.compat.mock import patch, MagicMock
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils._text import to_bytes
from ansible.playbook.play_context import PlayContext
from ansible.plugins.loader import connection_loader
from ansible.plugins.connection import winrm
pytest.importorskip("winrm")
class TestConnectionWinRM(object):
OPTIONS_DATA = (
# default options
(
{},
{'_extras': {}},
{},
{
'_kerb_managed': False,
'_kinit_cmd': 'kinit',
'_winrm_connection_timeout': None,
'_winrm_host': 'inventory_hostname',
'_winrm_kwargs': {'username': None, 'password': ''},
'_winrm_pass': '',
'_winrm_path': '/wsman',
'_winrm_port': 5986,
'_winrm_scheme': 'https',
'_winrm_transport': ['ssl'],
'_winrm_user': None
},
False
),
# http through port
(
{},
{'_extras': {}, 'ansible_port': 5985},
{},
{
'_winrm_kwargs': {'username': None, 'password': ''},
'_winrm_port': 5985,
'_winrm_scheme': 'http',
'_winrm_transport': ['plaintext'],
},
False
),
# kerberos user with kerb present
(
{},
{'_extras': {}, 'ansible_user': '[email protected]'},
{},
{
'_kerb_managed': False,
'_kinit_cmd': 'kinit',
'_winrm_kwargs': {'username': '[email protected]',
'password': ''},
'_winrm_pass': '',
'_winrm_transport': ['kerberos', 'ssl'],
'_winrm_user': '[email protected]'
},
True
),
# kerberos user without kerb present
(
{},
{'_extras': {}, 'ansible_user': '[email protected]'},
{},
{
'_kerb_managed': False,
'_kinit_cmd': 'kinit',
'_winrm_kwargs': {'username': '[email protected]',
'password': ''},
'_winrm_pass': '',
'_winrm_transport': ['ssl'],
'_winrm_user': '[email protected]'
},
False
),
# kerberos user with managed ticket (implicit)
(
{'password': 'pass'},
{'_extras': {}, 'ansible_user': '[email protected]'},
{},
{
'_kerb_managed': True,
'_kinit_cmd': 'kinit',
'_winrm_kwargs': {'username': '[email protected]',
'password': 'pass'},
'_winrm_pass': 'pass',
'_winrm_transport': ['kerberos', 'ssl'],
'_winrm_user': '[email protected]'
},
True
),
# kerb with managed ticket (explicit)
(
{'password': 'pass'},
{'_extras': {}, 'ansible_user': '[email protected]',
'ansible_winrm_kinit_mode': 'managed'},
{},
{
'_kerb_managed': True,
},
True
),
# kerb with unmanaged ticket (explicit))
(
{'password': 'pass'},
{'_extras': {}, 'ansible_user': '[email protected]',
'ansible_winrm_kinit_mode': 'manual'},
{},
{
'_kerb_managed': False,
},
True
),
# transport override (single)
(
{},
{'_extras': {}, 'ansible_user': '[email protected]',
'ansible_winrm_transport': 'ntlm'},
{},
{
'_winrm_kwargs': {'username': '[email protected]',
'password': ''},
'_winrm_pass': '',
'_winrm_transport': ['ntlm'],
},
False
),
# transport override (list)
(
{},
{'_extras': {}, 'ansible_user': '[email protected]',
'ansible_winrm_transport': ['ntlm', 'certificate']},
{},
{
'_winrm_kwargs': {'username': '[email protected]',
'password': ''},
'_winrm_pass': '',
'_winrm_transport': ['ntlm', 'certificate'],
},
False
),
# winrm extras
(
{},
{'_extras': {'ansible_winrm_server_cert_validation': 'ignore',
'ansible_winrm_service': 'WSMAN'}},
{},
{
'_winrm_kwargs': {'username': None, 'password': '',
'server_cert_validation': 'ignore',
'service': 'WSMAN'},
},
False
),
# direct override
(
{},
{'_extras': {}, 'ansible_winrm_connection_timeout': 5},
{'connection_timeout': 10},
{
'_winrm_connection_timeout': 10,
},
False
),
# user comes from option not play context
(
{'username': 'user1'},
{'_extras': {}, 'ansible_user': 'user2'},
{},
{
'_winrm_user': 'user2',
'_winrm_kwargs': {'username': 'user2', 'password': ''}
},
False
)
)
# pylint bug: https://github.com/PyCQA/pylint/issues/511
# pylint: disable=undefined-variable
@pytest.mark.parametrize('play, options, direct, expected, kerb',
((p, o, d, e, k) for p, o, d, e, k in OPTIONS_DATA))
def test_set_options(self, play, options, direct, expected, kerb):
winrm.HAVE_KERBEROS = kerb
pc = PlayContext()
for attr, value in play.items():
setattr(pc, attr, value)
new_stdin = StringIO()
conn = connection_loader.get('winrm', pc, new_stdin)
conn.set_options(var_options=options, direct=direct)
conn._build_winrm_kwargs()
for attr, expected in expected.items():
actual = getattr(conn, attr)
assert actual == expected, \
"winrm attr '%s', actual '%s' != expected '%s'"\
% (attr, actual, expected)
class TestWinRMKerbAuth(object):
@pytest.mark.parametrize('options, expected', [
[{"_extras": {}},
(["kinit", "user@domain"],)],
[{"_extras": {}, 'ansible_winrm_kinit_cmd': 'kinit2'},
(["kinit2", "user@domain"],)],
[{"_extras": {'ansible_winrm_kerberos_delegation': True}},
(["kinit", "-f", "user@domain"],)],
])
def test_kinit_success_subprocess(self, monkeypatch, options, expected):
def mock_communicate(input=None, timeout=None):
return b"", b""
mock_popen = MagicMock()
mock_popen.return_value.communicate = mock_communicate
mock_popen.return_value.returncode = 0
monkeypatch.setattr("subprocess.Popen", mock_popen)
winrm.HAS_PEXPECT = False
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('winrm', pc, new_stdin)
conn.set_options(var_options=options)
conn._build_winrm_kwargs()
conn._kerb_auth("user@domain", "pass")
mock_calls = mock_popen.mock_calls
assert len(mock_calls) == 1
assert mock_calls[0][1] == expected
actual_env = mock_calls[0][2]['env']
assert list(actual_env.keys()) == ['KRB5CCNAME']
assert actual_env['KRB5CCNAME'].startswith("FILE:/")
@pytest.mark.parametrize('options, expected', [
[{"_extras": {}},
("kinit", ["user@domain"],)],
[{"_extras": {}, 'ansible_winrm_kinit_cmd': 'kinit2'},
("kinit2", ["user@domain"],)],
[{"_extras": {'ansible_winrm_kerberos_delegation': True}},
("kinit", ["-f", "user@domain"],)],
])
def test_kinit_success_pexpect(self, monkeypatch, options, expected):
pytest.importorskip("pexpect")
mock_pexpect = MagicMock()
mock_pexpect.return_value.exitstatus = 0
monkeypatch.setattr("pexpect.spawn", mock_pexpect)
winrm.HAS_PEXPECT = True
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('winrm', pc, new_stdin)
conn.set_options(var_options=options)
conn._build_winrm_kwargs()
conn._kerb_auth("user@domain", "pass")
mock_calls = mock_pexpect.mock_calls
assert mock_calls[0][1] == expected
actual_env = mock_calls[0][2]['env']
assert list(actual_env.keys()) == ['KRB5CCNAME']
assert actual_env['KRB5CCNAME'].startswith("FILE:/")
assert mock_calls[0][2]['echo'] is False
assert mock_calls[1][0] == "().expect"
assert mock_calls[1][1] == (".*:",)
assert mock_calls[2][0] == "().sendline"
assert mock_calls[2][1] == ("pass",)
assert mock_calls[3][0] == "().read"
assert mock_calls[4][0] == "().wait"
def test_kinit_with_missing_executable_subprocess(self, monkeypatch):
expected_err = "[Errno 2] No such file or directory: " \
"'/fake/kinit': '/fake/kinit'"
mock_popen = MagicMock(side_effect=OSError(expected_err))
monkeypatch.setattr("subprocess.Popen", mock_popen)
winrm.HAS_PEXPECT = False
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('winrm', pc, new_stdin)
options = {"_extras": {}, "ansible_winrm_kinit_cmd": "/fake/kinit"}
conn.set_options(var_options=options)
conn._build_winrm_kwargs()
with pytest.raises(AnsibleConnectionFailure) as err:
conn._kerb_auth("user@domain", "pass")
assert str(err.value) == "Kerberos auth failure when calling " \
"kinit cmd '/fake/kinit': %s" % expected_err
def test_kinit_with_missing_executable_pexpect(self, monkeypatch):
pexpect = pytest.importorskip("pexpect")
expected_err = "The command was not found or was not " \
"executable: /fake/kinit"
mock_pexpect = \
MagicMock(side_effect=pexpect.ExceptionPexpect(expected_err))
monkeypatch.setattr("pexpect.spawn", mock_pexpect)
winrm.HAS_PEXPECT = True
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('winrm', pc, new_stdin)
options = {"_extras": {}, "ansible_winrm_kinit_cmd": "/fake/kinit"}
conn.set_options(var_options=options)
conn._build_winrm_kwargs()
with pytest.raises(AnsibleConnectionFailure) as err:
conn._kerb_auth("user@domain", "pass")
assert str(err.value) == "Kerberos auth failure when calling " \
"kinit cmd '/fake/kinit': %s" % expected_err
def test_kinit_error_subprocess(self, monkeypatch):
expected_err = "kinit: krb5_parse_name: " \
"Configuration file does not specify default realm"
def mock_communicate(input=None, timeout=None):
return b"", to_bytes(expected_err)
mock_popen = MagicMock()
mock_popen.return_value.communicate = mock_communicate
mock_popen.return_value.returncode = 1
monkeypatch.setattr("subprocess.Popen", mock_popen)
winrm.HAS_PEXPECT = False
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('winrm', pc, new_stdin)
conn.set_options(var_options={"_extras": {}})
conn._build_winrm_kwargs()
with pytest.raises(AnsibleConnectionFailure) as err:
conn._kerb_auth("invaliduser", "pass")
assert str(err.value) == \
"Kerberos auth failure for principal invaliduser with " \
"subprocess: %s" % (expected_err)
def test_kinit_error_pexpect(self, monkeypatch):
pytest.importorskip("pexpect")
expected_err = "Configuration file does not specify default realm"
mock_pexpect = MagicMock()
mock_pexpect.return_value.expect = MagicMock(side_effect=OSError)
mock_pexpect.return_value.read.return_value = to_bytes(expected_err)
mock_pexpect.return_value.exitstatus = 1
monkeypatch.setattr("pexpect.spawn", mock_pexpect)
winrm.HAS_PEXPECT = True
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('winrm', pc, new_stdin)
conn.set_options(var_options={"_extras": {}})
conn._build_winrm_kwargs()
with pytest.raises(AnsibleConnectionFailure) as err:
conn._kerb_auth("invaliduser", "pass")
assert str(err.value) == \
"Kerberos auth failure for principal invaliduser with " \
"pexpect: %s" % (expected_err)
def test_kinit_error_pass_in_output_subprocess(self, monkeypatch):
def mock_communicate(input=None, timeout=None):
return b"", b"Error with kinit\n" + input
mock_popen = MagicMock()
mock_popen.return_value.communicate = mock_communicate
mock_popen.return_value.returncode = 1
monkeypatch.setattr("subprocess.Popen", mock_popen)
winrm.HAS_PEXPECT = False
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('winrm', pc, new_stdin)
conn.set_options(var_options={"_extras": {}})
conn._build_winrm_kwargs()
with pytest.raises(AnsibleConnectionFailure) as err:
conn._kerb_auth("username", "password")
assert str(err.value) == \
"Kerberos auth failure for principal username with subprocess: " \
"Error with kinit\n<redacted>"
def test_kinit_error_pass_in_output_pexpect(self, monkeypatch):
pytest.importorskip("pexpect")
mock_pexpect = MagicMock()
mock_pexpect.return_value.expect = MagicMock()
mock_pexpect.return_value.read.return_value = \
b"Error with kinit\npassword\n"
mock_pexpect.return_value.exitstatus = 1
monkeypatch.setattr("pexpect.spawn", mock_pexpect)
winrm.HAS_PEXPECT = True
pc = PlayContext()
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('winrm', pc, new_stdin)
conn.set_options(var_options={"_extras": {}})
conn._build_winrm_kwargs()
with pytest.raises(AnsibleConnectionFailure) as err:
conn._kerb_auth("username", "password")
assert str(err.value) == \
"Kerberos auth failure for principal username with pexpect: " \
"Error with kinit\n<redacted>"
|
gpl-3.0
| 3,704,838,036,859,389,400 | -1,212,130,554,363,657,500 | 35.186321 | 92 | 0.51763 | false |
JCROM-Android/jcrom_external_chromium_org
|
third_party/tlslite/tlslite/utils/rijndael.py
|
359
|
11341
|
"""
A pure python (slow) implementation of rijndael with a decent interface
To include -
from rijndael import rijndael
To do a key setup -
r = rijndael(key, block_size = 16)
key must be a string of length 16, 24, or 32
blocksize must be 16, 24, or 32. Default is 16
To use -
ciphertext = r.encrypt(plaintext)
plaintext = r.decrypt(ciphertext)
If any strings are of the wrong length a ValueError is thrown
"""
# ported from the Java reference code by Bram Cohen, [email protected], April 2001
# this code is public domain, unless someone makes
# an intellectual property claim against the reference
# code, in which case it can be made public domain by
# deleting all the comments and renaming all the variables
import copy
import string
#-----------------------
#TREV - ADDED BECAUSE THERE'S WARNINGS ABOUT INT OVERFLOW BEHAVIOR CHANGING IN
#2.4.....
import os
if os.name != "java":
import exceptions
if hasattr(exceptions, "FutureWarning"):
import warnings
warnings.filterwarnings("ignore", category=FutureWarning, append=1)
#-----------------------
shifts = [[[0, 0], [1, 3], [2, 2], [3, 1]],
[[0, 0], [1, 5], [2, 4], [3, 3]],
[[0, 0], [1, 7], [3, 5], [4, 4]]]
# [keysize][block_size]
num_rounds = {16: {16: 10, 24: 12, 32: 14}, 24: {16: 12, 24: 12, 32: 14}, 32: {16: 14, 24: 14, 32: 14}}
A = [[1, 1, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 0, 0, 0, 1]]
# produce log and alog tables, needed for multiplying in the
# field GF(2^m) (generator = 3)
alog = [1]
for i in xrange(255):
j = (alog[-1] << 1) ^ alog[-1]
if j & 0x100 != 0:
j ^= 0x11B
alog.append(j)
log = [0] * 256
for i in xrange(1, 255):
log[alog[i]] = i
# multiply two elements of GF(2^m)
def mul(a, b):
if a == 0 or b == 0:
return 0
return alog[(log[a & 0xFF] + log[b & 0xFF]) % 255]
# substitution box based on F^{-1}(x)
box = [[0] * 8 for i in xrange(256)]
box[1][7] = 1
for i in xrange(2, 256):
j = alog[255 - log[i]]
for t in xrange(8):
box[i][t] = (j >> (7 - t)) & 0x01
B = [0, 1, 1, 0, 0, 0, 1, 1]
# affine transform: box[i] <- B + A*box[i]
cox = [[0] * 8 for i in xrange(256)]
for i in xrange(256):
for t in xrange(8):
cox[i][t] = B[t]
for j in xrange(8):
cox[i][t] ^= A[t][j] * box[i][j]
# S-boxes and inverse S-boxes
S = [0] * 256
Si = [0] * 256
for i in xrange(256):
S[i] = cox[i][0] << 7
for t in xrange(1, 8):
S[i] ^= cox[i][t] << (7-t)
Si[S[i] & 0xFF] = i
# T-boxes
G = [[2, 1, 1, 3],
[3, 2, 1, 1],
[1, 3, 2, 1],
[1, 1, 3, 2]]
AA = [[0] * 8 for i in xrange(4)]
for i in xrange(4):
for j in xrange(4):
AA[i][j] = G[i][j]
AA[i][i+4] = 1
for i in xrange(4):
pivot = AA[i][i]
if pivot == 0:
t = i + 1
while AA[t][i] == 0 and t < 4:
t += 1
assert t != 4, 'G matrix must be invertible'
for j in xrange(8):
AA[i][j], AA[t][j] = AA[t][j], AA[i][j]
pivot = AA[i][i]
for j in xrange(8):
if AA[i][j] != 0:
AA[i][j] = alog[(255 + log[AA[i][j] & 0xFF] - log[pivot & 0xFF]) % 255]
for t in xrange(4):
if i != t:
for j in xrange(i+1, 8):
AA[t][j] ^= mul(AA[i][j], AA[t][i])
AA[t][i] = 0
iG = [[0] * 4 for i in xrange(4)]
for i in xrange(4):
for j in xrange(4):
iG[i][j] = AA[i][j + 4]
def mul4(a, bs):
if a == 0:
return 0
r = 0
for b in bs:
r <<= 8
if b != 0:
r = r | mul(a, b)
return r
T1 = []
T2 = []
T3 = []
T4 = []
T5 = []
T6 = []
T7 = []
T8 = []
U1 = []
U2 = []
U3 = []
U4 = []
for t in xrange(256):
s = S[t]
T1.append(mul4(s, G[0]))
T2.append(mul4(s, G[1]))
T3.append(mul4(s, G[2]))
T4.append(mul4(s, G[3]))
s = Si[t]
T5.append(mul4(s, iG[0]))
T6.append(mul4(s, iG[1]))
T7.append(mul4(s, iG[2]))
T8.append(mul4(s, iG[3]))
U1.append(mul4(t, iG[0]))
U2.append(mul4(t, iG[1]))
U3.append(mul4(t, iG[2]))
U4.append(mul4(t, iG[3]))
# round constants
rcon = [1]
r = 1
for t in xrange(1, 30):
r = mul(2, r)
rcon.append(r)
del A
del AA
del pivot
del B
del G
del box
del log
del alog
del i
del j
del r
del s
del t
del mul
del mul4
del cox
del iG
class rijndael:
def __init__(self, key, block_size = 16):
if block_size != 16 and block_size != 24 and block_size != 32:
raise ValueError('Invalid block size: ' + str(block_size))
if len(key) != 16 and len(key) != 24 and len(key) != 32:
raise ValueError('Invalid key size: ' + str(len(key)))
self.block_size = block_size
ROUNDS = num_rounds[len(key)][block_size]
BC = block_size / 4
# encryption round keys
Ke = [[0] * BC for i in xrange(ROUNDS + 1)]
# decryption round keys
Kd = [[0] * BC for i in xrange(ROUNDS + 1)]
ROUND_KEY_COUNT = (ROUNDS + 1) * BC
KC = len(key) / 4
# copy user material bytes into temporary ints
tk = []
for i in xrange(0, KC):
tk.append((ord(key[i * 4]) << 24) | (ord(key[i * 4 + 1]) << 16) |
(ord(key[i * 4 + 2]) << 8) | ord(key[i * 4 + 3]))
# copy values into round key arrays
t = 0
j = 0
while j < KC and t < ROUND_KEY_COUNT:
Ke[t / BC][t % BC] = tk[j]
Kd[ROUNDS - (t / BC)][t % BC] = tk[j]
j += 1
t += 1
tt = 0
rconpointer = 0
while t < ROUND_KEY_COUNT:
# extrapolate using phi (the round key evolution function)
tt = tk[KC - 1]
tk[0] ^= (S[(tt >> 16) & 0xFF] & 0xFF) << 24 ^ \
(S[(tt >> 8) & 0xFF] & 0xFF) << 16 ^ \
(S[ tt & 0xFF] & 0xFF) << 8 ^ \
(S[(tt >> 24) & 0xFF] & 0xFF) ^ \
(rcon[rconpointer] & 0xFF) << 24
rconpointer += 1
if KC != 8:
for i in xrange(1, KC):
tk[i] ^= tk[i-1]
else:
for i in xrange(1, KC / 2):
tk[i] ^= tk[i-1]
tt = tk[KC / 2 - 1]
tk[KC / 2] ^= (S[ tt & 0xFF] & 0xFF) ^ \
(S[(tt >> 8) & 0xFF] & 0xFF) << 8 ^ \
(S[(tt >> 16) & 0xFF] & 0xFF) << 16 ^ \
(S[(tt >> 24) & 0xFF] & 0xFF) << 24
for i in xrange(KC / 2 + 1, KC):
tk[i] ^= tk[i-1]
# copy values into round key arrays
j = 0
while j < KC and t < ROUND_KEY_COUNT:
Ke[t / BC][t % BC] = tk[j]
Kd[ROUNDS - (t / BC)][t % BC] = tk[j]
j += 1
t += 1
# inverse MixColumn where needed
for r in xrange(1, ROUNDS):
for j in xrange(BC):
tt = Kd[r][j]
Kd[r][j] = U1[(tt >> 24) & 0xFF] ^ \
U2[(tt >> 16) & 0xFF] ^ \
U3[(tt >> 8) & 0xFF] ^ \
U4[ tt & 0xFF]
self.Ke = Ke
self.Kd = Kd
def encrypt(self, plaintext):
if len(plaintext) != self.block_size:
raise ValueError('wrong block length, expected ' + str(self.block_size) + ' got ' + str(len(plaintext)))
Ke = self.Ke
BC = self.block_size / 4
ROUNDS = len(Ke) - 1
if BC == 4:
SC = 0
elif BC == 6:
SC = 1
else:
SC = 2
s1 = shifts[SC][1][0]
s2 = shifts[SC][2][0]
s3 = shifts[SC][3][0]
a = [0] * BC
# temporary work array
t = []
# plaintext to ints + key
for i in xrange(BC):
t.append((ord(plaintext[i * 4 ]) << 24 |
ord(plaintext[i * 4 + 1]) << 16 |
ord(plaintext[i * 4 + 2]) << 8 |
ord(plaintext[i * 4 + 3]) ) ^ Ke[0][i])
# apply round transforms
for r in xrange(1, ROUNDS):
for i in xrange(BC):
a[i] = (T1[(t[ i ] >> 24) & 0xFF] ^
T2[(t[(i + s1) % BC] >> 16) & 0xFF] ^
T3[(t[(i + s2) % BC] >> 8) & 0xFF] ^
T4[ t[(i + s3) % BC] & 0xFF] ) ^ Ke[r][i]
t = copy.copy(a)
# last round is special
result = []
for i in xrange(BC):
tt = Ke[ROUNDS][i]
result.append((S[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF)
result.append((S[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF)
result.append((S[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF)
result.append((S[ t[(i + s3) % BC] & 0xFF] ^ tt ) & 0xFF)
return string.join(map(chr, result), '')
def decrypt(self, ciphertext):
if len(ciphertext) != self.block_size:
raise ValueError('wrong block length, expected ' + str(self.block_size) + ' got ' + str(len(plaintext)))
Kd = self.Kd
BC = self.block_size / 4
ROUNDS = len(Kd) - 1
if BC == 4:
SC = 0
elif BC == 6:
SC = 1
else:
SC = 2
s1 = shifts[SC][1][1]
s2 = shifts[SC][2][1]
s3 = shifts[SC][3][1]
a = [0] * BC
# temporary work array
t = [0] * BC
# ciphertext to ints + key
for i in xrange(BC):
t[i] = (ord(ciphertext[i * 4 ]) << 24 |
ord(ciphertext[i * 4 + 1]) << 16 |
ord(ciphertext[i * 4 + 2]) << 8 |
ord(ciphertext[i * 4 + 3]) ) ^ Kd[0][i]
# apply round transforms
for r in xrange(1, ROUNDS):
for i in xrange(BC):
a[i] = (T5[(t[ i ] >> 24) & 0xFF] ^
T6[(t[(i + s1) % BC] >> 16) & 0xFF] ^
T7[(t[(i + s2) % BC] >> 8) & 0xFF] ^
T8[ t[(i + s3) % BC] & 0xFF] ) ^ Kd[r][i]
t = copy.copy(a)
# last round is special
result = []
for i in xrange(BC):
tt = Kd[ROUNDS][i]
result.append((Si[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF)
result.append((Si[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF)
result.append((Si[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF)
result.append((Si[ t[(i + s3) % BC] & 0xFF] ^ tt ) & 0xFF)
return string.join(map(chr, result), '')
def encrypt(key, block):
return rijndael(key, len(block)).encrypt(block)
def decrypt(key, block):
return rijndael(key, len(block)).decrypt(block)
def test():
def t(kl, bl):
b = 'b' * bl
r = rijndael('a' * kl, bl)
assert r.decrypt(r.encrypt(b)) == b
t(16, 16)
t(16, 24)
t(16, 32)
t(24, 16)
t(24, 24)
t(24, 32)
t(32, 16)
t(32, 24)
t(32, 32)
|
bsd-3-clause
| 7,049,528,307,011,158,000 | 6,672,590,560,681,708,000 | 27.931122 | 116 | 0.432854 | false |
great-expectations/great_expectations
|
tests/datasource/test_batch_generators.py
|
1
|
6706
|
import os
from great_expectations.datasource.batch_kwargs_generator import (
DatabricksTableBatchKwargsGenerator,
GlobReaderBatchKwargsGenerator,
SubdirReaderBatchKwargsGenerator,
)
try:
from unittest import mock
except ImportError:
from unittest import mock
def test_file_kwargs_generator(
data_context_parameterized_expectation_suite, filesystem_csv
):
base_dir = filesystem_csv
datasource = data_context_parameterized_expectation_suite.add_datasource(
"default",
module_name="great_expectations.datasource",
class_name="PandasDatasource",
batch_kwargs_generators={
"subdir_reader": {
"class_name": "SubdirReaderBatchKwargsGenerator",
"base_directory": str(base_dir),
}
},
)
generator = datasource.get_batch_kwargs_generator("subdir_reader")
known_data_asset_names = datasource.get_available_data_asset_names()
# Use set to avoid order dependency
assert set(known_data_asset_names["subdir_reader"]["names"]) == {
("f1", "file"),
("f2", "file"),
("f3", "directory"),
}
f1_batches = [
batch_kwargs["path"]
for batch_kwargs in generator.get_iterator(data_asset_name="f1")
]
assert len(f1_batches) == 1
expected_batches = [{"path": os.path.join(base_dir, "f1.csv")}]
for batch in expected_batches:
assert batch["path"] in f1_batches
f3_batches = [
batch_kwargs["path"]
for batch_kwargs in generator.get_iterator(data_asset_name="f3")
]
assert len(f3_batches) == 2
expected_batches = [
{"path": os.path.join(base_dir, "f3", "f3_20190101.csv")},
{"path": os.path.join(base_dir, "f3", "f3_20190102.csv")},
]
for batch in expected_batches:
assert batch["path"] in f3_batches
def test_glob_reader_generator(basic_pandas_datasource, tmp_path_factory):
"""Provides an example of how glob generator works: we specify our own
names for data_assets, and an associated glob; the generator
will take care of providing batches consisting of one file per
batch corresponding to the glob."""
basedir = str(tmp_path_factory.mktemp("test_glob_reader_generator"))
with open(os.path.join(basedir, "f1.blarg"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f2.csv"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f3.blarg"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f4.blarg"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f5.blarg"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f6.blarg"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f7.xls"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f8.parquet"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f9.xls"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f0.json"), "w") as outfile:
outfile.write("\n\n\n")
g2 = GlobReaderBatchKwargsGenerator(
base_directory=basedir,
datasource=basic_pandas_datasource,
asset_globs={"blargs": {"glob": "*.blarg"}, "fs": {"glob": "f*"}},
)
g2_assets = g2.get_available_data_asset_names()
# Use set in test to avoid order issues
assert set(g2_assets["names"]) == {("blargs", "path"), ("fs", "path")}
blargs_kwargs = [x["path"] for x in g2.get_iterator(data_asset_name="blargs")]
real_blargs = [
os.path.join(basedir, "f1.blarg"),
os.path.join(basedir, "f3.blarg"),
os.path.join(basedir, "f4.blarg"),
os.path.join(basedir, "f5.blarg"),
os.path.join(basedir, "f6.blarg"),
]
for kwargs in real_blargs:
assert kwargs in blargs_kwargs
assert len(blargs_kwargs) == len(real_blargs)
def test_file_kwargs_generator_extensions(tmp_path_factory):
"""csv, xls, parquet, json should be recognized file extensions"""
basedir = str(tmp_path_factory.mktemp("test_file_kwargs_generator_extensions"))
# Do not include: invalid extension
with open(os.path.join(basedir, "f1.blarg"), "w") as outfile:
outfile.write("\n\n\n")
# Include
with open(os.path.join(basedir, "f2.csv"), "w") as outfile:
outfile.write("\n\n\n")
# Do not include: valid subdir, but no valid files in it
os.mkdir(os.path.join(basedir, "f3"))
with open(os.path.join(basedir, "f3", "f3_1.blarg"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f3", "f3_2.blarg"), "w") as outfile:
outfile.write("\n\n\n")
# Include: valid subdir with valid files
os.mkdir(os.path.join(basedir, "f4"))
with open(os.path.join(basedir, "f4", "f4_1.csv"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f4", "f4_2.csv"), "w") as outfile:
outfile.write("\n\n\n")
# Do not include: valid extension, but dot prefix
with open(os.path.join(basedir, ".f5.csv"), "w") as outfile:
outfile.write("\n\n\n")
# Include: valid extensions
with open(os.path.join(basedir, "f6.tsv"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f7.xls"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f8.parquet"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f9.xls"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f0.json"), "w") as outfile:
outfile.write("\n\n\n")
g1 = SubdirReaderBatchKwargsGenerator(datasource="foo", base_directory=basedir)
g1_assets = g1.get_available_data_asset_names()
# Use set in test to avoid order issues
assert set(g1_assets["names"]) == {
("f7", "file"),
("f4", "directory"),
("f6", "file"),
("f0", "file"),
("f2", "file"),
("f9", "file"),
("f8", "file"),
}
def test_databricks_generator(basic_sparkdf_datasource):
generator = DatabricksTableBatchKwargsGenerator(datasource=basic_sparkdf_datasource)
available_assets = generator.get_available_data_asset_names()
# We have no tables available
assert available_assets == {"names": []}
databricks_kwargs_iterator = generator.get_iterator(data_asset_name="foo")
kwargs = [batch_kwargs for batch_kwargs in databricks_kwargs_iterator]
assert "select * from" in kwargs[0]["query"].lower()
|
apache-2.0
| 8,615,507,957,590,099,000 | 9,188,894,421,155,249,000 | 36.463687 | 88 | 0.621384 | false |
thumbimigwe/echorizr
|
lib/python2.7/site-packages/django/template/loaders/cached.py
|
19
|
7094
|
"""
Wrapper class that takes a list of template loaders as an argument and attempts
to load templates from them in order, caching the result.
"""
import hashlib
import warnings
from django.template import Origin, Template, TemplateDoesNotExist
from django.template.backends.django import copy_exception
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_bytes
from django.utils.inspect import func_supports_parameter
from .base import Loader as BaseLoader
class Loader(BaseLoader):
def __init__(self, engine, loaders):
self.template_cache = {}
self.find_template_cache = {} # RemovedInDjango20Warning
self.get_template_cache = {}
self.loaders = engine.get_template_loaders(loaders)
super(Loader, self).__init__(engine)
def get_contents(self, origin):
return origin.loader.get_contents(origin)
def get_template(self, template_name, template_dirs=None, skip=None):
"""
Perform the caching that gives this loader its name. Often many of the
templates attempted will be missing, so memory use is of concern here.
To keep it in check, caching behavior is a little complicated when a
template is not found. See ticket #26306 for more details.
With template debugging disabled, cache the TemplateDoesNotExist class
for every missing template and raise a new instance of it after
fetching it from the cache.
With template debugging enabled, a unique TemplateDoesNotExist object
is cached for each missing template to preserve debug data. When
raising an exception, Python sets __traceback__, __context__, and
__cause__ attributes on it. Those attributes can contain references to
all sorts of objects up the call chain and caching them creates a
memory leak. Thus, unraised copies of the exceptions are cached and
copies of those copies are raised after they're fetched from the cache.
"""
key = self.cache_key(template_name, template_dirs, skip)
cached = self.get_template_cache.get(key)
if cached:
if isinstance(cached, type) and issubclass(cached, TemplateDoesNotExist):
raise cached(template_name)
elif isinstance(cached, TemplateDoesNotExist):
raise copy_exception(cached)
return cached
try:
template = super(Loader, self).get_template(
template_name, template_dirs, skip,
)
except TemplateDoesNotExist as e:
self.get_template_cache[key] = copy_exception(e) if self.engine.debug else TemplateDoesNotExist
raise
else:
self.get_template_cache[key] = template
return template
def get_template_sources(self, template_name, template_dirs=None):
for loader in self.loaders:
args = [template_name]
# RemovedInDjango20Warning: Add template_dirs for compatibility
# with old loaders
if func_supports_parameter(loader.get_template_sources, 'template_dirs'):
args.append(template_dirs)
for origin in loader.get_template_sources(*args):
yield origin
def cache_key(self, template_name, template_dirs, skip=None):
"""
Generate a cache key for the template name, dirs, and skip.
If skip is provided, only origins that match template_name are included
in the cache key. This ensures each template is only parsed and cached
once if contained in different extend chains like:
x -> a -> a
y -> a -> a
z -> a -> a
"""
dirs_prefix = ''
skip_prefix = ''
if skip:
matching = [origin.name for origin in skip if origin.template_name == template_name]
if matching:
skip_prefix = self.generate_hash(matching)
if template_dirs:
dirs_prefix = self.generate_hash(template_dirs)
return ("%s-%s-%s" % (template_name, skip_prefix, dirs_prefix)).strip('-')
def generate_hash(self, values):
return hashlib.sha1(force_bytes('|'.join(values))).hexdigest()
@property
def supports_recursion(self):
"""
RemovedInDjango20Warning: This is an internal property used by the
ExtendsNode during the deprecation of non-recursive loaders.
"""
return all(hasattr(loader, 'get_contents') for loader in self.loaders)
def find_template(self, name, dirs=None):
"""
RemovedInDjango20Warning: An internal method to lookup the template
name in all the configured loaders.
"""
key = self.cache_key(name, dirs)
try:
result = self.find_template_cache[key]
except KeyError:
result = None
for loader in self.loaders:
try:
template, display_name = loader(name, dirs)
except TemplateDoesNotExist:
pass
else:
origin = Origin(
name=display_name,
template_name=name,
loader=loader,
)
result = template, origin
break
self.find_template_cache[key] = result
if result:
return result
else:
self.template_cache[key] = TemplateDoesNotExist
raise TemplateDoesNotExist(name)
def load_template(self, template_name, template_dirs=None):
warnings.warn(
'The load_template() method is deprecated. Use get_template() '
'instead.', RemovedInDjango20Warning,
)
key = self.cache_key(template_name, template_dirs)
template_tuple = self.template_cache.get(key)
# A cached previous failure:
if template_tuple is TemplateDoesNotExist:
raise TemplateDoesNotExist(template_name)
elif template_tuple is None:
template, origin = self.find_template(template_name, template_dirs)
if not hasattr(template, 'render'):
try:
template = Template(template, origin, template_name, self.engine)
except TemplateDoesNotExist:
# If compiling the template we found raises TemplateDoesNotExist,
# back off to returning the source and display name for the template
# we were asked to load. This allows for correct identification (later)
# of the actual template that does not exist.
self.template_cache[key] = (template, origin)
self.template_cache[key] = (template, None)
return self.template_cache[key]
def reset(self):
"Empty the template cache."
self.template_cache.clear()
self.find_template_cache.clear() # RemovedInDjango20Warning
self.get_template_cache.clear()
|
mit
| 4,281,646,025,147,076,600 | 2,293,646,604,124,273,200 | 39.770115 | 107 | 0.619397 | false |
glls/Cinnamon
|
files/usr/share/cinnamon/cinnamon-settings/modules/cs_workspaces.py
|
3
|
2070
|
#!/usr/bin/python3
from SettingsWidgets import SidePage
from xapp.GSettingsWidgets import *
class Module:
name = "workspaces"
category = "prefs"
comment = _("Manage workspace preferences")
def __init__(self, content_box):
keywords = _("workspace, osd, expo, monitor")
sidePage = SidePage(_("Workspaces"), "cs-workspaces", keywords, content_box, module=self)
self.sidePage = sidePage
def shouldLoad(self):
return True
def on_module_selected(self):
if not self.loaded:
print("Loading Workspaces module")
page = SettingsPage()
self.sidePage.add_widget(page)
settings = page.add_section(_("Workspace Options"))
switch = GSettingsSwitch(_("Enable workspace OSD"), "org.cinnamon", "workspace-osd-visible")
settings.add_row(switch)
switch = GSettingsSwitch(_("Allow cycling through workspaces"), "org.cinnamon.muffin", "workspace-cycle")
settings.add_row(switch)
switch = GSettingsSwitch(_("Only use workspaces on primary monitor (requires Cinnamon restart)"), "org.cinnamon.muffin", "workspaces-only-on-primary")
settings.add_row(switch)
switch = GSettingsSwitch(_("Display Expo view as a grid"), "org.cinnamon", "workspace-expo-view-as-grid")
settings.add_row(switch)
# Edge Flip doesn't work well, so it's there in gsettings, but we don't show it to users yet
# switch = GSettingsSwitch(_("Enable Edge Flip"), "org.cinnamon", "enable-edge-flip")
# settings.add_row(switch)
# spin = GSettingsSpinButton(_("Edge Flip delay"), "org.cinnamon", "edge-flip-delay", mini=1, maxi=3000, units=_("ms"))
# settings.add_reveal_row(spin, "org.cinnamon", "enable-edge-flip")
switch = GSettingsSwitch(_("Invert the left and right arrow key directions used to shift workspaces during a window drag"), "org.cinnamon.muffin", "invert-workspace-flip-direction")
settings.add_row(switch)
|
gpl-2.0
| 7,300,961,723,665,278,000 | -2,768,561,795,648,544,300 | 42.125 | 193 | 0.638164 | false |
stephen144/odoo
|
addons/gamification/tests/test_challenge.py
|
39
|
4253
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.tests import common
class test_challenge(common.TransactionCase):
def setUp(self):
super(test_challenge, self).setUp()
cr, uid = self.cr, self.uid
self.data_obj = self.registry('ir.model.data')
self.user_obj = self.registry('res.users')
self.challenge_obj = self.registry('gamification.challenge')
self.line_obj = self.registry('gamification.challenge.line')
self.goal_obj = self.registry('gamification.goal')
self.badge_obj = self.registry('gamification.badge')
self.badge_user_obj = self.registry('gamification.badge.user')
self.demo_user_id = self.data_obj.get_object_reference(cr, uid, 'base', 'user_demo')[1]
self.group_user_id = self.data_obj.get_object_reference(cr, uid, 'base', 'group_user')[1]
self.challenge_base_id = self.data_obj.get_object_reference(cr, uid, 'gamification', 'challenge_base_discover')[1]
self.definition_timezone_id = self.data_obj.get_object_reference(cr, uid, 'gamification', 'definition_base_timezone')[1]
self.badge_id = self.data_obj.get_object_reference(cr, uid, 'gamification', 'badge_good_job')[1]
def test_00_join_challenge(self):
cr, uid, context = self.cr, self.uid, {}
user_ids = self.user_obj.search(cr, uid, [('groups_id', '=', self.group_user_id)])
challenge = self.challenge_obj.browse(cr, uid, self.challenge_base_id, context=context)
self.assertGreaterEqual(len(challenge.user_ids), len(user_ids), "Not enough users in base challenge")
self.user_obj.create(cr, uid, {
'name': 'R2D2',
'login': '[email protected]',
'email': '[email protected]',
'groups_id': [(6, 0, [self.group_user_id])]
}, {'no_reset_password': True})
self.challenge_obj._update_all(cr, uid, [self.challenge_base_id], context=context)
challenge = self.challenge_obj.browse(cr, uid, self.challenge_base_id, context=context)
self.assertGreaterEqual(len(challenge.user_ids), len(user_ids)+1, "These are not droids you are looking for")
def test_10_reach_challenge(self):
cr, uid, context = self.cr, self.uid, {}
self.challenge_obj.write(cr, uid, [self.challenge_base_id], {'state': 'inprogress'}, context=context)
challenge = self.challenge_obj.browse(cr, uid, self.challenge_base_id, context=context)
challenge_user_ids = [user.id for user in challenge.user_ids]
self.assertEqual(challenge.state, 'inprogress', "Challenge failed the change of state")
line_ids = self.line_obj.search(cr, uid, [('challenge_id', '=', self.challenge_base_id)], context=context)
goal_ids = self.goal_obj.search(cr, uid, [('challenge_id', '=', self.challenge_base_id), ('state', '!=', 'draft')], context=context)
self.assertEqual(len(goal_ids), len(line_ids)*len(challenge_user_ids), "Incorrect number of goals generated, should be 1 goal per user, per challenge line")
# demo user will set a timezone
self.user_obj.write(cr, uid, self.demo_user_id, {'tz': "Europe/Brussels"}, context=context)
goal_ids = self.goal_obj.search(cr, uid, [('user_id', '=', self.demo_user_id), ('definition_id', '=', self.definition_timezone_id)], context=context)
self.goal_obj.update(cr, uid, goal_ids, context=context)
reached_goal_ids = self.goal_obj.search(cr, uid, [('id', 'in', goal_ids), ('state', '=', 'reached')], context=context)
self.assertEqual(set(goal_ids), set(reached_goal_ids), "Not every goal was reached after changing timezone")
# reward for two firsts as admin may have timezone
self.challenge_obj.write(cr, uid, self.challenge_base_id, {'reward_first_id': self.badge_id, 'reward_second_id': self.badge_id}, context=context)
self.challenge_obj.write(cr, uid, self.challenge_base_id, {'state': 'done'}, context=context)
badge_ids = self.badge_user_obj.search(cr, uid, [('badge_id', '=', self.badge_id), ('user_id', '=', self.demo_user_id)])
self.assertGreater(len(badge_ids), 0, "Demo user has not received the badge")
|
agpl-3.0
| 7,477,389,525,400,835,000 | 5,525,725,783,862,529,000 | 58.083333 | 164 | 0.650129 | false |
ericmok/eri53
|
learn/libsvm-3.12/tools/checkdata.py
|
144
|
2479
|
#!/usr/bin/env python
#
# A format checker for LIBSVM
#
#
# Copyright (c) 2007, Rong-En Fan
#
# All rights reserved.
#
# This program is distributed under the same license of the LIBSVM package.
#
from sys import argv, exit
import os.path
def err(line_no, msg):
print("line {0}: {1}".format(line_no, msg))
# works like float() but does not accept nan and inf
def my_float(x):
if x.lower().find("nan") != -1 or x.lower().find("inf") != -1:
raise ValueError
return float(x)
def main():
if len(argv) != 2:
print("Usage: {0} dataset".format(argv[0]))
exit(1)
dataset = argv[1]
if not os.path.exists(dataset):
print("dataset {0} not found".format(dataset))
exit(1)
line_no = 1
error_line_count = 0
for line in open(dataset, 'r'):
line_error = False
# each line must end with a newline character
if line[-1] != '\n':
err(line_no, "missing a newline character in the end")
line_error = True
nodes = line.split()
# check label
try:
label = nodes.pop(0)
if label.find(',') != -1:
# multi-label format
try:
for l in label.split(','):
l = my_float(l)
except:
err(line_no, "label {0} is not a valid multi-label form".format(label))
line_error = True
else:
try:
label = my_float(label)
except:
err(line_no, "label {0} is not a number".format(label))
line_error = True
except:
err(line_no, "missing label, perhaps an empty line?")
line_error = True
# check features
prev_index = -1
for i in range(len(nodes)):
try:
(index, value) = nodes[i].split(':')
index = int(index)
value = my_float(value)
# precomputed kernel's index starts from 0 and LIBSVM
# checks it. Hence, don't treat index 0 as an error.
if index < 0:
err(line_no, "feature index must be positive; wrong feature {0}".format(nodes[i]))
line_error = True
elif index <= prev_index:
err(line_no, "feature indices must be in an ascending order, previous/current features {0} {1}".format(nodes[i-1], nodes[i]))
line_error = True
prev_index = index
except:
err(line_no, "feature '{0}' not an <index>:<value> pair, <index> integer, <value> real number ".format(nodes[i]))
line_error = True
line_no += 1
if line_error:
error_line_count += 1
if error_line_count > 0:
print("Found {0} lines with error.".format(error_line_count))
return 1
else:
print("No error.")
return 0
if __name__ == "__main__":
exit(main())
|
unlicense
| -314,977,299,926,581,440 | 2,530,087,451,464,291,000 | 21.953704 | 130 | 0.620815 | false |
ekeih/libtado
|
docs/source/conf.py
|
1
|
4718
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# libtado documentation build configuration file, created by
# sphinx-quickstart on Thu Feb 16 17:11:41 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['ntemplates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'libtado'
copyright = '2017, Max Rosin'
author = 'Max Rosin'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.0.0'
# The full version, including alpha/beta/rc tags.
release = '2.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['nstatic']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'libtadodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'libtado.tex', 'libtado Documentation',
'Max Rosin', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'libtado', 'libtado Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'libtado', 'libtado Documentation',
author, 'libtado', 'One line description of project.',
'Miscellaneous'),
]
|
gpl-3.0
| 8,797,049,572,915,555,000 | 1,444,000,500,623,753,500 | 29.050955 | 79 | 0.673379 | false |
jswope00/GAI
|
common/djangoapps/user_api/tests/test_models.py
|
52
|
1757
|
from django.db import IntegrityError
from django.test import TestCase
from student.tests.factories import UserFactory
from user_api.tests.factories import UserPreferenceFactory
from user_api.models import UserPreference
class UserPreferenceModelTest(TestCase):
def test_duplicate_user_key(self):
user = UserFactory.create()
UserPreferenceFactory.create(user=user, key="testkey", value="first")
self.assertRaises(
IntegrityError,
UserPreferenceFactory.create,
user=user,
key="testkey",
value="second"
)
def test_arbitrary_values(self):
user = UserFactory.create()
UserPreferenceFactory.create(user=user, key="testkey0", value="")
UserPreferenceFactory.create(user=user, key="testkey1", value="This is some English text!")
UserPreferenceFactory.create(user=user, key="testkey2", value="{'some': 'json'}")
UserPreferenceFactory.create(
user=user,
key="testkey3",
value="\xe8\xbf\x99\xe6\x98\xaf\xe4\xb8\xad\xe5\x9b\xbd\xe6\x96\x87\xe5\xad\x97'"
)
def test_get_set_preference(self):
# Checks that you can set a preference and get that preference later
# Also, tests that no preference is returned for keys that are not set
user = UserFactory.create()
key = 'testkey'
value = 'testvalue'
# does a round trip
UserPreference.set_preference(user, key, value)
pref = UserPreference.get_preference(user, key)
self.assertEqual(pref, value)
# get preference for key that doesn't exist for user
pref = UserPreference.get_preference(user, 'testkey_none')
self.assertIsNone(pref)
|
agpl-3.0
| -8,619,998,081,051,190,000 | -6,762,787,304,334,091,000 | 36.382979 | 99 | 0.656801 | false |
er432/TASSELpy
|
TASSELpy/net/maizegenetics/trait/CombinePhenotype.py
|
1
|
4297
|
from TASSELpy.utils.helper import make_sig
from TASSELpy.utils.Overloading import javaOverload, javaConstructorOverload, javaStaticOverload
from TASSELpy.net.maizegenetics.trait.AbstractPhenotype import AbstractPhenotype
from TASSELpy.net.maizegenetics.trait.Phenotype import Phenotype
from TASSELpy.net.maizegenetics.trait.SimplePhenotype import SimplePhenotype
from TASSELpy.net.maizegenetics.trait import Trait
from TASSELpy.net.maizegenetics.taxa.Taxon import Taxon
from TASSELpy.java.lang.Boolean import metaBoolean
from TASSELpy.java.lang.Integer import metaInteger
from TASSELpy.java.lang.Double import metaDouble
from TASSELpy.javaObj import javaArray
java_imports = {'CombinePhenotype': 'net/maizegenetics/trait/CombinePhenotype',
'Phenotype':'net/maizegenetics/trait/Phenotype',
'SimplePhenotype':'net/maizegenetics/trait/SimplePhenotype',
'Taxon':'net/maizegenetics/taxa/Taxon',
'Trait':'net/maizegenetics/trait/Trait'}
class CombinePhenotype(AbstractPhenotype):
""" Class used for combining multiple phenotypes that may have different taxa
among them
"""
_java_name = java_imports['CombinePhenotype']
@javaConstructorOverload(java_imports['CombinePhenotype'])
def __init__(self, *args, **kwargs):
pass
@javaStaticOverload(java_imports['CombinePhenotype'],"getInstance",
(make_sig([java_imports['Phenotype'],java_imports['Phenotype'],'boolean'],
java_imports['CombinePhenotype']),
(Phenotype,Phenotype,metaBoolean),
lambda x: CombinePhenotype(obj=x)),
(make_sig([java_imports['Phenotype']+'[]','boolean'],
java_imports['CombinePhenotype']),
(javaArray.get_array_type(Phenotype),metaBoolean),
lambda x: CombinePhenotype(obj=x)))
def getInstance(*args):
""" Gets an instance of CombinePhenotype, allowing combining of more than
one phenotype
Signatures:
static CombinePhenotype getInstance(Phenotype phenotype1, Phenotype phenotype2,
boolean isUnion)
static CombinePhenotype getInstance(Phenotype[] phenotypes, boolean isUnion)
Arguments:
static CombinePhenotype getInstance(Phenotype phenotype1, Phenotype phenotype2,
boolean isUnion)
phenotype1 -- The first phenotype
phenotype2 -- The second phenotype
isUnion -- Whether to take the union of the taxa. If false, takes the intersection
static CombinePhenotype getInstance(Phenotype[] phenotypes, boolean isUnion)
phenotypes -- List of Phenotype objects to combine
isUnion -- Whether to take the union of the taxa in the Phenotypes. If false,
takes the intersection
"""
pass
@javaOverload("setData",
(make_sig([java_imports['Taxon'],java_imports['Trait'],'double'],'void'),
(Taxon,Trait,metaDouble),None),
(make_sig(['int','int','double'],'void'),
(metaInteger,metaInteger,metaDouble),None))
def setData(self, *args):
""" Sets a particular data value
Signatures:
void setData(Taxon taxon, Trait trait, double value)
void setData(int taxon, int trait, double value)
Arguments:
void setData(Taxon taxon, Trait trait, double value)
taxon -- A Taxon instance
trait -- A Trait instance
value -- The value to set
void setData(int taxon, int trait, double value)
taxon -- index of a taxon
trait -- index of a trait
value -- The value to set
"""
pass
@javaOverload("simpleCopy",
(make_sig([],java_imports['SimplePhenotype']),(),
lambda x: SimplePhenotype(obj=x)))
def simpleCopy(self, *args):
""" Makes a copy of the CombinePhenotype as a SimplePhenotype
Signatures:
SimplePhenotype simpleCopy()
Returns:
A SimplePhenotype copy of the object
"""
pass
|
bsd-3-clause
| -2,581,533,344,575,449,000 | -498,065,948,893,986,370 | 42.846939 | 98 | 0.628811 | false |
dario61081/koalixcrm
|
koalixcrm/accounting/rest/restinterface.py
|
2
|
2538
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from rest_framework import viewsets
from koalixcrm.accounting.models import Account, AccountingPeriod, Booking, ProductCategory
from koalixcrm.accounting.rest.account_rest import AccountJSONSerializer
from koalixcrm.accounting.rest.accounting_period_rest import AccountingPeriodJSONSerializer
from koalixcrm.accounting.rest.booking_rest import BookingJSONSerializer
from koalixcrm.accounting.rest.product_categorie_rest import ProductCategoryJSONSerializer
from koalixcrm.global_support_functions import ConditionalMethodDecorator
class AccountAsJSON(viewsets.ModelViewSet):
"""
API endpoint that allows accounts to be created, viewed and modified.
"""
queryset = Account.objects.all()
serializer_class = AccountJSONSerializer
filter_fields = ('account_type',)
@ConditionalMethodDecorator(method_decorator(login_required), settings.KOALIXCRM_REST_API_AUTH)
def dispatch(self, *args, **kwargs):
return super(AccountAsJSON, self).dispatch(*args, **kwargs)
class AccountingPeriodAsJSON(viewsets.ModelViewSet):
"""
API endpoint that allows accounting periods to be created, viewed and modified.
"""
queryset = AccountingPeriod.objects.all()
serializer_class = AccountingPeriodJSONSerializer
@ConditionalMethodDecorator(method_decorator(login_required), settings.KOALIXCRM_REST_API_AUTH)
def dispatch(self, *args, **kwargs):
return super(AccountingPeriodAsJSON, self).dispatch(*args, **kwargs)
class BookingAsJSON(viewsets.ModelViewSet):
"""
API endpoint that allows bookings to be created, viewed and modified.
"""
queryset = Booking.objects.all()
serializer_class = BookingJSONSerializer
@ConditionalMethodDecorator(method_decorator(login_required), settings.KOALIXCRM_REST_API_AUTH)
def dispatch(self, *args, **kwargs):
return super(BookingAsJSON, self).dispatch(*args, **kwargs)
class ProductCategoryAsJSON(viewsets.ModelViewSet):
"""
API endpoint that allows product categories to be created, viewed and modified.
"""
queryset = ProductCategory.objects.all()
serializer_class = ProductCategoryJSONSerializer
@ConditionalMethodDecorator(method_decorator(login_required), settings.KOALIXCRM_REST_API_AUTH)
def dispatch(self, *args, **kwargs):
return super(ProductCategoryAsJSON, self).dispatch(*args, **kwargs)
|
bsd-3-clause
| 5,115,219,438,698,928,000 | 7,453,382,103,192,972,000 | 40.606557 | 99 | 0.76911 | false |
jblackburne/scikit-learn
|
sklearn/gaussian_process/gpc.py
|
42
|
31571
|
"""Gaussian processes classification."""
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import warnings
from operator import itemgetter
import numpy as np
from scipy.linalg import cholesky, cho_solve, solve
from scipy.optimize import fmin_l_bfgs_b
from scipy.special import erf
from sklearn.base import BaseEstimator, ClassifierMixin, clone
from sklearn.gaussian_process.kernels \
import RBF, CompoundKernel, ConstantKernel as C
from sklearn.utils.validation import check_X_y, check_is_fitted, check_array
from sklearn.utils import check_random_state
from sklearn.preprocessing import LabelEncoder
from sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier
# Values required for approximating the logistic sigmoid by
# error functions. coefs are obtained via:
# x = np.array([0, 0.6, 2, 3.5, 4.5, np.inf])
# b = logistic(x)
# A = (erf(np.dot(x, self.lambdas)) + 1) / 2
# coefs = lstsq(A, b)[0]
LAMBDAS = np.array([0.41, 0.4, 0.37, 0.44, 0.39])[:, np.newaxis]
COEFS = np.array([-1854.8214151, 3516.89893646, 221.29346712,
128.12323805, -2010.49422654])[:, np.newaxis]
class _BinaryGaussianProcessClassifierLaplace(BaseEstimator):
"""Binary Gaussian process classification based on Laplace approximation.
The implementation is based on Algorithm 3.1, 3.2, and 5.1 of
``Gaussian Processes for Machine Learning'' (GPML) by Rasmussen and
Williams.
Internally, the Laplace approximation is used for approximating the
non-Gaussian posterior by a Gaussian.
Currently, the implementation is restricted to using the logistic link
function.
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer: int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer=0 implies that one
run is performed.
max_iter_predict: int, optional (default: 100)
The maximum number of iterations in Newton's method for approximating
the posterior during predict. Smaller values will reduce computation
time at the cost of worse results.
warm_start : bool, optional (default: False)
If warm-starts are enabled, the solution of the last Newton iteration
on the Laplace approximation of the posterior mode is used as
initialization for the next call of _posterior_mode(). This can speed
up convergence when _posterior_mode is called several times on similar
problems as in hyperparameter optimization.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
X_train_ : array-like, shape = (n_samples, n_features)
Feature values in training data (also required for prediction)
y_train_: array-like, shape = (n_samples,)
Target values in training data (also required for prediction)
classes_ : array-like, shape = (n_classes,)
Unique class labels.
kernel_: kernel object
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters
L_: array-like, shape = (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in X_train_
pi_: array-like, shape = (n_samples,)
The probabilities of the positive class for the training points
X_train_
W_sr_: array-like, shape = (n_samples,)
Square root of W, the Hessian of log-likelihood of the latent function
values for the observed labels. Since W is diagonal, only the diagonal
of sqrt(W) is stored.
log_marginal_likelihood_value_: float
The log-marginal-likelihood of ``self.kernel_.theta``
"""
def __init__(self, kernel=None, optimizer="fmin_l_bfgs_b",
n_restarts_optimizer=0, max_iter_predict=100,
warm_start=False, copy_X_train=True, random_state=None):
self.kernel = kernel
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.max_iter_predict = max_iter_predict
self.warm_start = warm_start
self.copy_X_train = copy_X_train
self.random_state = random_state
def fit(self, X, y):
"""Fit Gaussian process classification model
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples,)
Target values, must be binary
Returns
-------
self : returns an instance of self.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") \
* RBF(1.0, length_scale_bounds="fixed")
else:
self.kernel_ = clone(self.kernel)
self.rng = check_random_state(self.random_state)
self.X_train_ = np.copy(X) if self.copy_X_train else X
# Encode class labels and check that it is a binary classification
# problem
label_encoder = LabelEncoder()
self.y_train_ = label_encoder.fit_transform(y)
self.classes_ = label_encoder.classes_
if self.classes_.size > 2:
raise ValueError("%s supports only binary classification. "
"y contains classes %s"
% (self.__class__.__name__, self.classes_))
elif self.classes_.size == 1:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta)
# First optimize starting from theta specified in kernel
optima = [self._constrained_optimization(obj_func,
self.kernel_.theta,
self.kernel_.bounds)]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite.")
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = np.exp(self.rng.uniform(bounds[:, 0],
bounds[:, 1]))
optima.append(
self._constrained_optimization(obj_func, theta_initial,
bounds))
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = \
self.log_marginal_likelihood(self.kernel_.theta)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_(self.X_train_)
_, (self.pi_, self.W_sr_, self.L_, _, _) = \
self._posterior_mode(K, return_temporaries=True)
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array, shape = (n_samples,)
Predicted target values for X, values are from ``classes_``
"""
check_is_fitted(self, ["X_train_", "y_train_", "pi_", "W_sr_", "L_"])
# As discussed on Section 3.4.2 of GPML, for making hard binary
# decisions, it is enough to compute the MAP of the posterior and
# pass it through the link function
K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
f_star = K_star.T.dot(self.y_train_ - self.pi_) # Algorithm 3.2,Line 4
return np.where(f_star > 0, self.classes_[1], self.classes_[0])
def predict_proba(self, X):
"""Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array-like, shape = (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute ``classes_``.
"""
check_is_fitted(self, ["X_train_", "y_train_", "pi_", "W_sr_", "L_"])
# Based on Algorithm 3.2 of GPML
K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
f_star = K_star.T.dot(self.y_train_ - self.pi_) # Line 4
v = solve(self.L_, self.W_sr_[:, np.newaxis] * K_star) # Line 5
# Line 6 (compute np.diag(v.T.dot(v)) via einsum)
var_f_star = self.kernel_.diag(X) - np.einsum("ij,ij->j", v, v)
# Line 7:
# Approximate \int log(z) * N(z | f_star, var_f_star)
# Approximation is due to Williams & Barber, "Bayesian Classification
# with Gaussian Processes", Appendix A: Approximate the logistic
# sigmoid by a linear combination of 5 error functions.
# For information on how this integral can be computed see
# blitiri.blogspot.de/2012/11/gaussian-integral-of-error-function.html
alpha = 1 / (2 * var_f_star)
gamma = LAMBDAS * f_star
integrals = np.sqrt(np.pi / alpha) \
* erf(gamma * np.sqrt(alpha / (alpha + LAMBDAS**2))) \
/ (2 * np.sqrt(var_f_star * 2 * np.pi))
pi_star = (COEFS * integrals).sum(axis=0) + .5 * COEFS.sum()
return np.vstack((1 - pi_star, pi_star)).T
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
kernel = self.kernel_.clone_with_theta(theta)
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
# Compute log-marginal-likelihood Z and also store some temporaries
# which can be reused for computing Z's gradient
Z, (pi, W_sr, L, b, a) = \
self._posterior_mode(K, return_temporaries=True)
if not eval_gradient:
return Z
# Compute gradient based on Algorithm 5.1 of GPML
d_Z = np.empty(theta.shape[0])
# XXX: Get rid of the np.diag() in the next line
R = W_sr[:, np.newaxis] * cho_solve((L, True), np.diag(W_sr)) # Line 7
C = solve(L, W_sr[:, np.newaxis] * K) # Line 8
# Line 9: (use einsum to compute np.diag(C.T.dot(C))))
s_2 = -0.5 * (np.diag(K) - np.einsum('ij, ij -> j', C, C)) \
* (pi * (1 - pi) * (1 - 2 * pi)) # third derivative
for j in range(d_Z.shape[0]):
C = K_gradient[:, :, j] # Line 11
# Line 12: (R.T.ravel().dot(C.ravel()) = np.trace(R.dot(C)))
s_1 = .5 * a.T.dot(C).dot(a) - .5 * R.T.ravel().dot(C.ravel())
b = C.dot(self.y_train_ - pi) # Line 13
s_3 = b - K.dot(R.dot(b)) # Line 14
d_Z[j] = s_1 + s_2.T.dot(s_3) # Line 15
return Z, d_Z
def _posterior_mode(self, K, return_temporaries=False):
"""Mode-finding for binary Laplace GPC and fixed kernel.
This approximates the posterior of the latent function values for given
inputs and target observations with a Gaussian approximation and uses
Newton's iteration to find the mode of this approximation.
"""
# Based on Algorithm 3.1 of GPML
# If warm_start are enabled, we reuse the last solution for the
# posterior mode as initialization; otherwise, we initialize with 0
if self.warm_start and hasattr(self, "f_cached") \
and self.f_cached.shape == self.y_train_.shape:
f = self.f_cached
else:
f = np.zeros_like(self.y_train_, dtype=np.float64)
# Use Newton's iteration method to find mode of Laplace approximation
log_marginal_likelihood = -np.inf
for _ in range(self.max_iter_predict):
# Line 4
pi = 1 / (1 + np.exp(-f))
W = pi * (1 - pi)
# Line 5
W_sr = np.sqrt(W)
W_sr_K = W_sr[:, np.newaxis] * K
B = np.eye(W.shape[0]) + W_sr_K * W_sr
L = cholesky(B, lower=True)
# Line 6
b = W * f + (self.y_train_ - pi)
# Line 7
a = b - W_sr * cho_solve((L, True), W_sr_K.dot(b))
# Line 8
f = K.dot(a)
# Line 10: Compute log marginal likelihood in loop and use as
# convergence criterion
lml = -0.5 * a.T.dot(f) \
- np.log(1 + np.exp(-(self.y_train_ * 2 - 1) * f)).sum() \
- np.log(np.diag(L)).sum()
# Check if we have converged (log marginal likelihood does
# not decrease)
# XXX: more complex convergence criterion
if lml - log_marginal_likelihood < 1e-10:
break
log_marginal_likelihood = lml
self.f_cached = f # Remember solution for later warm-starts
if return_temporaries:
return log_marginal_likelihood, (pi, W_sr, L, b, a)
else:
return log_marginal_likelihood
def _constrained_optimization(self, obj_func, initial_theta, bounds):
if self.optimizer == "fmin_l_bfgs_b":
theta_opt, func_min, convergence_dict = \
fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds)
if convergence_dict["warnflag"] != 0:
warnings.warn("fmin_l_bfgs_b terminated abnormally with the "
" state: %s" % convergence_dict)
elif callable(self.optimizer):
theta_opt, func_min = \
self.optimizer(obj_func, initial_theta, bounds=bounds)
else:
raise ValueError("Unknown optimizer %s." % self.optimizer)
return theta_opt, func_min
class GaussianProcessClassifier(BaseEstimator, ClassifierMixin):
"""Gaussian process classification (GPC) based on Laplace approximation.
The implementation is based on Algorithm 3.1, 3.2, and 5.1 of
Gaussian Processes for Machine Learning (GPML) by Rasmussen and
Williams.
Internally, the Laplace approximation is used for approximating the
non-Gaussian posterior by a Gaussian.
Currently, the implementation is restricted to using the logistic link
function. For multi-class classification, several binary one-versus rest
classifiers are fitted. Note that this class thus does not implement
a true multi-class Laplace approximation.
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer: int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer=0 implies that one
run is performed.
max_iter_predict: int, optional (default: 100)
The maximum number of iterations in Newton's method for approximating
the posterior during predict. Smaller values will reduce computation
time at the cost of worse results.
warm_start : bool, optional (default: False)
If warm-starts are enabled, the solution of the last Newton iteration
on the Laplace approximation of the posterior mode is used as
initialization for the next call of _posterior_mode(). This can speed
up convergence when _posterior_mode is called several times on similar
problems as in hyperparameter optimization.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
multi_class: string, default: "one_vs_rest"
Specifies how multi-class classification problems are handled.
Supported are "one_vs_rest" and "one_vs_one". In "one_vs_rest",
one binary Gaussian process classifier is fitted for each class, which
is trained to separate this class from the rest. In "one_vs_one", one
binary Gaussian process classifier is fitted for each pair of classes,
which is trained to separate these two classes. The predictions of
these binary predictors are combined into multi-class predictions.
Note that "one_vs_one" does not support predicting probability
estimates.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
kernel_ : kernel object
The kernel used for prediction. In case of binary classification,
the structure of the kernel is the same as the one passed as parameter
but with optimized hyperparameters. In case of multi-class
classification, a CompoundKernel is returned which consists of the
different kernels used in the one-versus-rest classifiers.
log_marginal_likelihood_value_: float
The log-marginal-likelihood of ``self.kernel_.theta``
classes_ : array-like, shape = (n_classes,)
Unique class labels.
n_classes_ : int
The number of classes in the training data
"""
def __init__(self, kernel=None, optimizer="fmin_l_bfgs_b",
n_restarts_optimizer=0, max_iter_predict=100,
warm_start=False, copy_X_train=True, random_state=None,
multi_class="one_vs_rest", n_jobs=1):
self.kernel = kernel
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.max_iter_predict = max_iter_predict
self.warm_start = warm_start
self.copy_X_train = copy_X_train
self.random_state = random_state
self.multi_class = multi_class
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit Gaussian process classification model
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples,)
Target values, must be binary
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, multi_output=False)
self.base_estimator_ = _BinaryGaussianProcessClassifierLaplace(
self.kernel, self.optimizer, self.n_restarts_optimizer,
self.max_iter_predict, self.warm_start, self.copy_X_train,
self.random_state)
self.classes_ = np.unique(y)
self.n_classes_ = self.classes_.size
if self.n_classes_ == 1:
raise ValueError("GaussianProcessClassifier requires 2 or more "
"distinct classes. Only class %s present."
% self.classes_[0])
if self.n_classes_ > 2:
if self.multi_class == "one_vs_rest":
self.base_estimator_ = \
OneVsRestClassifier(self.base_estimator_,
n_jobs=self.n_jobs)
elif self.multi_class == "one_vs_one":
self.base_estimator_ = \
OneVsOneClassifier(self.base_estimator_,
n_jobs=self.n_jobs)
else:
raise ValueError("Unknown multi-class mode %s"
% self.multi_class)
self.base_estimator_.fit(X, y)
if self.n_classes_ > 2:
self.log_marginal_likelihood_value_ = np.mean(
[estimator.log_marginal_likelihood()
for estimator in self.base_estimator_.estimators_])
else:
self.log_marginal_likelihood_value_ = \
self.base_estimator_.log_marginal_likelihood()
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array, shape = (n_samples,)
Predicted target values for X, values are from ``classes_``
"""
check_is_fitted(self, ["classes_", "n_classes_"])
X = check_array(X)
return self.base_estimator_.predict(X)
def predict_proba(self, X):
"""Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array-like, shape = (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
check_is_fitted(self, ["classes_", "n_classes_"])
if self.n_classes_ > 2 and self.multi_class == "one_vs_one":
raise ValueError("one_vs_one multi-class mode does not support "
"predicting probability estimates. Use "
"one_vs_rest mode instead.")
X = check_array(X)
return self.base_estimator_.predict_proba(X)
@property
def kernel_(self):
if self.n_classes_ == 2:
return self.base_estimator_.kernel_
else:
return CompoundKernel(
[estimator.kernel_
for estimator in self.base_estimator_.estimators_])
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
In the case of multi-class classification, the mean log-marginal
likelihood of the one-versus-rest classifiers are returned.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or none
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. In the case of multi-class classification, theta may
be the hyperparameters of the compound kernel or of an individual
kernel. In the latter case, all individual kernel get assigned the
same theta values. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. Note that gradient computation is not supported
for non-binary classification. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
check_is_fitted(self, ["classes_", "n_classes_"])
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
theta = np.asarray(theta)
if self.n_classes_ == 2:
return self.base_estimator_.log_marginal_likelihood(
theta, eval_gradient)
else:
if eval_gradient:
raise NotImplementedError(
"Gradient of log-marginal-likelihood not implemented for "
"multi-class GPC.")
estimators = self.base_estimator_.estimators_
n_dims = estimators[0].kernel_.n_dims
if theta.shape[0] == n_dims: # use same theta for all sub-kernels
return np.mean(
[estimator.log_marginal_likelihood(theta)
for i, estimator in enumerate(estimators)])
elif theta.shape[0] == n_dims * self.classes_.shape[0]:
# theta for compound kernel
return np.mean(
[estimator.log_marginal_likelihood(
theta[n_dims * i:n_dims * (i + 1)])
for i, estimator in enumerate(estimators)])
else:
raise ValueError("Shape of theta must be either %d or %d. "
"Obtained theta with shape %d."
% (n_dims, n_dims * self.classes_.shape[0],
theta.shape[0]))
|
bsd-3-clause
| 8,333,238,078,053,932,000 | -8,674,369,218,904,303,000 | 42.30727 | 79 | 0.598429 | false |
lidiamcfreitas/FenixScheduleMaker
|
ScheduleMaker/brython/www/src/Lib/encodings/cp1254.py
|
37
|
13809
|
""" Python Character Mapping Codec cp1254 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1254.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1254',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\u20ac' # 0x80 -> EURO SIGN
'\ufffe' # 0x81 -> UNDEFINED
'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
'\u2020' # 0x86 -> DAGGER
'\u2021' # 0x87 -> DOUBLE DAGGER
'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
'\u2030' # 0x89 -> PER MILLE SIGN
'\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON
'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE
'\ufffe' # 0x8D -> UNDEFINED
'\ufffe' # 0x8E -> UNDEFINED
'\ufffe' # 0x8F -> UNDEFINED
'\ufffe' # 0x90 -> UNDEFINED
'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
'\u2022' # 0x95 -> BULLET
'\u2013' # 0x96 -> EN DASH
'\u2014' # 0x97 -> EM DASH
'\u02dc' # 0x98 -> SMALL TILDE
'\u2122' # 0x99 -> TRADE MARK SIGN
'\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON
'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\u0153' # 0x9C -> LATIN SMALL LIGATURE OE
'\ufffe' # 0x9D -> UNDEFINED
'\ufffe' # 0x9E -> UNDEFINED
'\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
'\xa2' # 0xA2 -> CENT SIGN
'\xa3' # 0xA3 -> POUND SIGN
'\xa4' # 0xA4 -> CURRENCY SIGN
'\xa5' # 0xA5 -> YEN SIGN
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\xa8' # 0xA8 -> DIAERESIS
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\xae' # 0xAE -> REGISTERED SIGN
'\xaf' # 0xAF -> MACRON
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\xb4' # 0xB4 -> ACUTE ACCENT
'\xb5' # 0xB5 -> MICRO SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\xb8' # 0xB8 -> CEDILLA
'\xb9' # 0xB9 -> SUPERSCRIPT ONE
'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
'\xbf' # 0xBF -> INVERTED QUESTION MARK
'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\u011e' # 0xD0 -> LATIN CAPITAL LETTER G WITH BREVE
'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd7' # 0xD7 -> MULTIPLICATION SIGN
'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\u0130' # 0xDD -> LATIN CAPITAL LETTER I WITH DOT ABOVE
'\u015e' # 0xDE -> LATIN CAPITAL LETTER S WITH CEDILLA
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
'\u011f' # 0xF0 -> LATIN SMALL LETTER G WITH BREVE
'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf7' # 0xF7 -> DIVISION SIGN
'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\u0131' # 0xFD -> LATIN SMALL LETTER DOTLESS I
'\u015f' # 0xFE -> LATIN SMALL LETTER S WITH CEDILLA
'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
bsd-2-clause
| -7,070,844,793,366,025,000 | -4,766,234,721,344,595,000 | 42.980456 | 119 | 0.521834 | false |
chainer/chainer
|
chainer/utils/sparse.py
|
3
|
7680
|
import numpy
import chainer
from chainer import backend
from chainer import cuda
def _add_at(add_at, x, row, col, data):
assert data.size > 0
last_nz = data.size - (data != 0)[::-1].argmax()
add_at(x, (row[:last_nz], col[:last_nz]), data[:last_nz])
class CooMatrix(object):
"""A sparse matrix in COO format.
Args:
data (:ref:`ndarray`): The entries of the matrix.
The entries are usually non-zero-elements in the matrix.
row (:ref:`ndarray`): The row indices of the matrix
entries.
col (:ref:`ndarray`): The column indices of the matrix
entries.
shape (tuple of int): The shape of the matrix in dense format.
order ('C', 'F', 'other' or None): If ``'C'``, the maxtix is assumed
that its row indices are sorted. If ``'F'``, the matrix is assumed
that its column indices are sorted. If ``'other'``, the matrix is
assumed as neither 'C' order nor 'F' order. If ``None`` (this is
the default), the matrix is automatically checked if it is 'C'
order, 'F' order or another. This information will be used by some
functions like :func:`~chainer.functions.sparse_matmul` as a hint
to improve performance.
requires_grad (bool): If ``True``, gradient of this sparse matrix will
be computed in back-propagation.
.. seealso::
See :func:`~chainer.utils.to_coo` for how to construct a COO matrix
from an array.
"""
def __init__(self, data, row, col, shape, order=None,
requires_grad=False):
if not (1 <= data.ndim <= 2):
raise ValueError('ndim of data must be 1 or 2.')
if not (data.ndim == row.ndim == col.ndim):
raise ValueError('ndim of data, row and col must be the same.')
if len(shape) != 2:
raise ValueError('length of shape must be 2.')
if not (shape[0] > 0 and shape[1] > 0):
raise ValueError('numbers in shape must be greater than 0.')
if order not in ('C', 'F', 'other', None):
raise ValueError('order must be \'C\', \'F\', \'other\' or None.')
self.data = chainer.Variable(data, requires_grad=requires_grad)
self.row = row
self.col = col
self.shape = shape # (row, col)
self.order = order
if order is None:
self.order = get_order(row, col)
def to_dense(self):
"""Returns a dense matrix format of this sparse matrix."""
data = self.data
if data.ndim == 1:
shape = self.shape
elif data.ndim == 2:
shape = (data.shape[0], *self.shape)
else:
assert False
xp = data.xp
x = xp.zeros(shape, dtype=data.dtype)
if data.size > 0:
row = self.row
col = self.col
if xp is numpy:
add_at = numpy.add.at
elif xp is cuda.cupy:
add_at = cuda.cupyx.scatter_add
data = data.array
if data.ndim == 1:
_add_at(add_at, x, row, col, data)
elif data.ndim == 2:
for i in range(data.shape[0]):
_add_at(add_at, x[i], row[i], col[i], data[i])
else:
assert False
return x
def to_coo(x, ldnz=None, requires_grad=False):
"""Returns a single or a batch of matrices in COO format.
Args:
x (:ref:`ndarray`): Input dense matrix. The ndim of
``x`` must be two or three. If ndim is two, it is treated as
a single matrix. If three, it is treated as batched matrices.
ldnz (int): Size of arrays for data, row index and column index to be
created. The Actual size becomes max(nnz, ldnz) where nnz is number
of non-zero elements in a input dense matrix.
requires_grad (bool): If ``True``, gradient of sparse matrix will be
computed in back-propagation.
Returns:
~chainer.utils.CooMatrix: A sparse matrix or batched sparse matrices
in COO format of a given dense matrix or batched dense matrices.
.. admonition:: Example
Create a :class:`~chainer.utils.CooMatrix` from an array with 2
non-zero elements and 4 zeros and access its attributes. No batch
dimension is involved.
.. doctest::
>>> data = np.array([[0, 2, 0], [-1, 0, 0]], np.float32)
>>> x = chainer.utils.to_coo(data)
>>> x.data
variable([ 2., -1.])
>>> x.row
array([0, 1], dtype=int32)
>>> x.col
array([1, 0], dtype=int32)
>>> x.shape
(2, 3)
"""
xp = backend.get_array_module(x)
if x.ndim == 2:
_row, _col = xp.where(x != 0)
nnz = len(_row)
if ldnz is None or ldnz < nnz:
ldnz = nnz
data = xp.zeros((ldnz), dtype=x.dtype)
row = xp.full((ldnz), -1, dtype=xp.int32)
col = xp.full((ldnz), -1, dtype=xp.int32)
data[:nnz] = x[_row, _col]
row[:nnz] = xp.array(_row).astype(xp.int32)
col[:nnz] = xp.array(_col).astype(xp.int32)
shape = x.shape
return CooMatrix(data, row, col, shape,
requires_grad=requires_grad)
elif x.ndim == 3:
# first axis is batch axis
nb = x.shape[0]
if ldnz is None:
ldnz = 0
for i in range(nb):
ldnz = max(ldnz, len(xp.where(x[i] != 0)[0]))
data = xp.empty((nb, ldnz), dtype=x.dtype)
row = xp.empty((nb, ldnz), dtype=xp.int32)
col = xp.empty((nb, ldnz), dtype=xp.int32)
for i in range(nb):
coo = to_coo(x[i], ldnz)
data[i] = coo.data.data
row[i] = coo.row
col[i] = coo.col
shape = x.shape[1:]
return CooMatrix(data, row, col, shape,
requires_grad=requires_grad)
else:
raise ValueError('ndim of x must be 2 or 3.')
def get_order(row, col):
"""Check if a coo matrix with given row and col is C or F order.
Args:
row (:ref:`ndarray`): The row indices of the matrix
entries.
col (:ref:`ndarray`): The column indices of the matrix
entries.
Returns:
Returns ``'C'`` when a coo matrix with given row and column indices is
C order, in other words, the row indices are sorted. Returns ``'F'``
when it is F order, in other words, the column indices are sorted.
Returns ``'other'`` otherwise.
"""
if _is_c_order(row, col):
return 'C'
if _is_c_order(col, row):
return 'F'
return 'other'
def _is_c_order(row, col):
"""Check if a coo matrix with given row and col is c_order"""
if row.shape != col.shape:
raise ValueError('shape of row and col must be the same.')
if row.ndim != 1:
for i in range(row.shape[0]):
if not _is_c_order(row[i], col[i]):
return False
return True
xp = backend.get_array_module(row)
_row = row[col >= 0]
_col = col[row >= 0]
if _row[_row < 0].size > 0 or _col[_col < 0].size:
raise ValueError('invalid index combination of row and col.')
if _row.shape[0] <= 1:
return True
row_diff = xp.zeros(_row.shape, dtype=_row.dtype)
row_diff[1:] = _row[1:] - _row[:-1]
if xp.amin(row_diff) < 0:
return False
col_diff = xp.zeros(_col.shape, dtype=_col.dtype)
col_diff[1:] = _col[1:] - _col[:-1]
col_diff[(row_diff > 0)] = 0
return xp.amin(col_diff) >= 0
|
mit
| -3,126,197,173,803,247,600 | 2,816,019,392,391,918,600 | 34.88785 | 79 | 0.542057 | false |
sudheesh001/oh-mainline
|
vendor/packages/scrapy/scrapy/contrib/httpcache.py
|
16
|
2156
|
from __future__ import with_statement
import os
from time import time
import cPickle as pickle
from scrapy.http import Headers
from scrapy.responsetypes import responsetypes
from scrapy.utils.request import request_fingerprint
from scrapy.utils.project import data_path
from scrapy import conf
class DbmCacheStorage(object):
def __init__(self, settings=conf.settings):
self.cachedir = data_path(settings['HTTPCACHE_DIR'])
self.expiration_secs = settings.getint('HTTPCACHE_EXPIRATION_SECS')
self.dbmodule = __import__(settings['HTTPCACHE_DBM_MODULE'])
self.dbs = {}
def open_spider(self, spider):
dbpath = os.path.join(self.cachedir, '%s.db' % spider.name)
self.dbs[spider] = self.dbmodule.open(dbpath, 'c')
def close_spider(self, spider):
self.dbs[spider].close()
def retrieve_response(self, spider, request):
data = self._read_data(spider, request)
if data is None:
return # not cached
url = data['url']
status = data['status']
headers = Headers(data['headers'])
body = data['body']
respcls = responsetypes.from_args(headers=headers, url=url)
response = respcls(url=url, headers=headers, status=status, body=body)
return response
def store_response(self, spider, request, response):
key = self._request_key(request)
data = {
'status': response.status,
'url': response.url,
'headers': dict(response.headers),
'body': response.body,
}
self.dbs[spider]['%s_data' % key] = pickle.dumps(data, protocol=2)
self.dbs[spider]['%s_time' % key] = str(time())
def _read_data(self, spider, request):
key = self._request_key(request)
db = self.dbs[spider]
tkey = '%s_time' % key
if not db.has_key(tkey):
return # not found
ts = db[tkey]
if 0 < self.expiration_secs < time() - float(ts):
return # expired
return pickle.loads(db['%s_data' % key])
def _request_key(self, request):
return request_fingerprint(request)
|
agpl-3.0
| 7,492,197,297,951,696,000 | 5,341,334,180,839,947,000 | 32.6875 | 78 | 0.615492 | false |
tdtrask/ansible
|
test/units/modules/network/onyx/test_onyx_vlan.py
|
15
|
3728
|
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.onyx import onyx_vlan
from units.modules.utils import set_module_args
from .onyx_module import TestOnyxModule, load_fixture
class TestOnyxVlanModule(TestOnyxModule):
module = onyx_vlan
def setUp(self):
super(TestOnyxVlanModule, self).setUp()
self.mock_get_config = patch.object(
onyx_vlan.OnyxVlanModule, "_get_vlan_config")
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch(
'ansible.module_utils.network.onyx.onyx.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestOnyxVlanModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, transport='cli'):
config_file = 'onyx_vlan_show.cfg'
self.get_config.return_value = load_fixture(config_file)
self.load_config.return_value = None
def test_vlan_no_change(self):
set_module_args(dict(vlan_id=20))
self.execute_module(changed=False)
def test_vlan_remove_name(self):
set_module_args(dict(vlan_id=10, name=''))
commands = ['vlan 10 no name']
self.execute_module(changed=True, commands=commands)
def test_vlan_change_name(self):
set_module_args(dict(vlan_id=10, name='test-test'))
commands = ['vlan 10 name test-test']
self.execute_module(changed=True, commands=commands)
def test_vlan_create(self):
set_module_args(dict(vlan_id=30))
commands = ['vlan 30', 'exit']
self.execute_module(changed=True, commands=commands)
def test_vlan_create_with_name(self):
set_module_args(dict(vlan_id=30, name='test-test'))
commands = ['vlan 30', 'exit', 'vlan 30 name test-test']
self.execute_module(changed=True, commands=commands)
def test_vlan_remove(self):
set_module_args(dict(vlan_id=20, state='absent'))
commands = ['no vlan 20']
self.execute_module(changed=True, commands=commands)
def test_vlan_remove_not_exist(self):
set_module_args(dict(vlan_id=30, state='absent'))
self.execute_module(changed=False)
def test_vlan_aggregate(self):
aggregate = list()
aggregate.append(dict(vlan_id=30))
aggregate.append(dict(vlan_id=20))
set_module_args(dict(aggregate=aggregate))
commands = ['vlan 30', 'exit']
self.execute_module(changed=True, commands=commands)
def test_vlan_aggregate_purge(self):
aggregate = list()
aggregate.append(dict(vlan_id=30))
aggregate.append(dict(vlan_id=20))
set_module_args(dict(aggregate=aggregate, purge=True))
commands = ['vlan 30', 'exit', 'no vlan 10', 'no vlan 1']
self.execute_module(changed=True, commands=commands)
|
gpl-3.0
| 7,829,551,864,647,742,000 | -7,060,536,640,379,650,000 | 35.54902 | 70 | 0.671137 | false |
wunderlins/learning
|
python/zodb/lib/osx/ZODB/MappingStorage.py
|
2
|
11501
|
##############################################################################
#
# Copyright (c) Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""A simple in-memory mapping-based ZODB storage
This storage provides an example implementation of a fairly full
storage without distracting storage details.
"""
import BTrees
import time
import threading
import ZODB.BaseStorage
import ZODB.interfaces
import ZODB.POSException
import ZODB.TimeStamp
import ZODB.utils
import zope.interface
@zope.interface.implementer(
ZODB.interfaces.IStorage,
ZODB.interfaces.IStorageIteration,
)
class MappingStorage(object):
def __init__(self, name='MappingStorage'):
self.__name__ = name
self._data = {} # {oid->{tid->pickle}}
self._transactions = BTrees.OOBTree.OOBTree() # {tid->TransactionRecord}
self._ltid = ZODB.utils.z64
self._last_pack = None
_lock = threading.RLock()
self._lock_acquire = _lock.acquire
self._lock_release = _lock.release
self._commit_lock = threading.Lock()
self._opened = True
self._transaction = None
self._oid = 0
######################################################################
# Preconditions:
def opened(self):
"""The storage is open
"""
return self._opened
def not_in_transaction(self):
"""The storage is not committing a transaction
"""
return self._transaction is None
#
######################################################################
# testing framework (lame)
def cleanup(self):
pass
# ZODB.interfaces.IStorage
@ZODB.utils.locked
def close(self):
self._opened = False
# ZODB.interfaces.IStorage
def getName(self):
return self.__name__
# ZODB.interfaces.IStorage
@ZODB.utils.locked(opened)
def getSize(self):
size = 0
for oid, tid_data in self._data.items():
size += 50
for tid, pickle in tid_data.items():
size += 100+len(pickle)
return size
# ZEO.interfaces.IServeable
@ZODB.utils.locked(opened)
def getTid(self, oid):
tid_data = self._data.get(oid)
if tid_data:
return tid_data.maxKey()
raise ZODB.POSException.POSKeyError(oid)
# ZODB.interfaces.IStorage
@ZODB.utils.locked(opened)
def history(self, oid, size=1):
tid_data = self._data.get(oid)
if not tid_data:
raise ZODB.POSException.POSKeyError(oid)
tids = tid_data.keys()[-size:]
tids.reverse()
return [
dict(
time = ZODB.TimeStamp.TimeStamp(tid),
tid = tid,
serial = tid,
user_name = self._transactions[tid].user,
description = self._transactions[tid].description,
extension = self._transactions[tid].extension,
size = len(tid_data[tid])
)
for tid in tids]
# ZODB.interfaces.IStorage
def isReadOnly(self):
return False
# ZODB.interfaces.IStorageIteration
def iterator(self, start=None, end=None):
for transaction_record in self._transactions.values(start, end):
yield transaction_record
# ZODB.interfaces.IStorage
@ZODB.utils.locked(opened)
def lastTransaction(self):
return self._ltid
# ZODB.interfaces.IStorage
@ZODB.utils.locked(opened)
def __len__(self):
return len(self._data)
# ZODB.interfaces.IStorage
@ZODB.utils.locked(opened)
def load(self, oid, version=''):
assert not version, "Versions are not supported"
tid_data = self._data.get(oid)
if tid_data:
tid = tid_data.maxKey()
return tid_data[tid], tid
raise ZODB.POSException.POSKeyError(oid)
# ZODB.interfaces.IStorage
@ZODB.utils.locked(opened)
def loadBefore(self, oid, tid):
tid_data = self._data.get(oid)
if tid_data:
before = ZODB.utils.u64(tid)
if not before:
return None
before = ZODB.utils.p64(before-1)
tids_before = tid_data.keys(None, before)
if tids_before:
tids_after = tid_data.keys(tid, None)
tid = tids_before[-1]
return (tid_data[tid], tid,
(tids_after and tids_after[0] or None)
)
else:
raise ZODB.POSException.POSKeyError(oid)
# ZODB.interfaces.IStorage
@ZODB.utils.locked(opened)
def loadSerial(self, oid, serial):
tid_data = self._data.get(oid)
if tid_data:
try:
return tid_data[serial]
except KeyError:
pass
raise ZODB.POSException.POSKeyError(oid, serial)
# ZODB.interfaces.IStorage
@ZODB.utils.locked(opened)
def new_oid(self):
self._oid += 1
return ZODB.utils.p64(self._oid)
# ZODB.interfaces.IStorage
@ZODB.utils.locked(opened)
def pack(self, t, referencesf, gc=True):
if not self._data:
return
stop = ZODB.TimeStamp.TimeStamp(*time.gmtime(t)[:5]+(t%60,)).raw()
if self._last_pack is not None and self._last_pack >= stop:
if self._last_pack == stop:
return
raise ValueError("Already packed to a later time")
self._last_pack = stop
transactions = self._transactions
# Step 1, remove old non-current records
for oid, tid_data in self._data.items():
tids_to_remove = tid_data.keys(None, stop)
if tids_to_remove:
tids_to_remove.pop() # Keep the last, if any
if tids_to_remove:
for tid in tids_to_remove:
del tid_data[tid]
if transactions[tid].pack(oid):
del transactions[tid]
if gc:
# Step 2, GC. A simple sweep+copy
new_data = BTrees.OOBTree.OOBTree()
to_copy = set([ZODB.utils.z64])
while to_copy:
oid = to_copy.pop()
tid_data = self._data.pop(oid)
new_data[oid] = tid_data
for pickle in tid_data.values():
for oid in referencesf(pickle):
if oid in new_data:
continue
to_copy.add(oid)
# Remove left over data from transactions
for oid, tid_data in self._data.items():
for tid in tid_data:
if transactions[tid].pack(oid):
del transactions[tid]
self._data.clear()
self._data.update(new_data)
# ZODB.interfaces.IStorage
def registerDB(self, db):
pass
# ZODB.interfaces.IStorage
def sortKey(self):
return self.__name__
# ZODB.interfaces.IStorage
@ZODB.utils.locked(opened)
def store(self, oid, serial, data, version, transaction):
assert not version, "Versions are not supported"
if transaction is not self._transaction:
raise ZODB.POSException.StorageTransactionError(self, transaction)
old_tid = None
tid_data = self._data.get(oid)
if tid_data:
old_tid = tid_data.maxKey()
if serial != old_tid:
raise ZODB.POSException.ConflictError(
oid=oid, serials=(old_tid, serial), data=data)
self._tdata[oid] = data
return self._tid
checkCurrentSerialInTransaction = (
ZODB.BaseStorage.checkCurrentSerialInTransaction)
# ZODB.interfaces.IStorage
@ZODB.utils.locked(opened)
def tpc_abort(self, transaction):
if transaction is not self._transaction:
return
self._transaction = None
self._commit_lock.release()
# ZODB.interfaces.IStorage
@ZODB.utils.locked(opened)
def tpc_begin(self, transaction, tid=None):
# The tid argument exists to support testing.
if transaction is self._transaction:
raise ZODB.POSException.StorageTransactionError(
"Duplicate tpc_begin calls for same transaction")
self._lock_release()
self._commit_lock.acquire()
self._lock_acquire()
self._transaction = transaction
self._tdata = {}
if tid is None:
if self._transactions:
old_tid = self._transactions.maxKey()
else:
old_tid = None
tid = ZODB.utils.newTid(old_tid)
self._tid = tid
# ZODB.interfaces.IStorage
@ZODB.utils.locked(opened)
def tpc_finish(self, transaction, func = lambda tid: None):
if (transaction is not self._transaction):
raise ZODB.POSException.StorageTransactionError(
"tpc_finish called with wrong transaction")
tid = self._tid
func(tid)
tdata = self._tdata
for oid in tdata:
tid_data = self._data.get(oid)
if tid_data is None:
tid_data = BTrees.OOBTree.OOBucket()
self._data[oid] = tid_data
tid_data[tid] = tdata[oid]
self._ltid = tid
self._transactions[tid] = TransactionRecord(tid, transaction, tdata)
self._transaction = None
del self._tdata
self._commit_lock.release()
# ZEO.interfaces.IServeable
@ZODB.utils.locked(opened)
def tpc_transaction(self):
return self._transaction
# ZODB.interfaces.IStorage
def tpc_vote(self, transaction):
if transaction is not self._transaction:
raise ZODB.POSException.StorageTransactionError(
"tpc_vote called with wrong transaction")
class TransactionRecord:
status = ' '
def __init__(self, tid, transaction, data):
self.tid = tid
self.user = transaction.user
self.description = transaction.description
extension = transaction._extension
self.extension = extension
self.data = data
_extension = property(lambda self: self._extension,
lambda self, v: setattr(self, '_extension', v),
)
def __iter__(self):
for oid, data in self.data.items():
yield DataRecord(oid, self.tid, data)
def pack(self, oid):
self.status = 'p'
del self.data[oid]
return not self.data
@zope.interface.implementer(ZODB.interfaces.IStorageRecordInformation)
class DataRecord(object):
"""Abstract base class for iterator protocol"""
version = ''
data_txn = None
def __init__(self, oid, tid, data):
self.oid = oid
self.tid = tid
self.data = data
def DB(*args, **kw):
return ZODB.DB(MappingStorage(), *args, **kw)
|
gpl-2.0
| -5,683,113,328,315,049,000 | 2,745,027,205,229,272,600 | 30.252717 | 80 | 0.560821 | false |
muhleder/timestrap
|
timestrap/settings/base.py
|
1
|
3215
|
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__)
)
)
)
# SECURITY WARNING: set this to your domain name in production!
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'conf',
'core',
'api',
'widget_tweaks',
'django_filters',
'rest_framework',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
]
SITE_ID = 1
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'conf.middleware.site.SiteMiddleware',
'conf.middleware.i18n.I18NMiddleware',
]
ROOT_URLCONF = 'timestrap.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'timestrap/templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'timestrap.wsgi.application'
# Authentication
# https://docs.djangoproject.com/en/1.11/topics/auth/default/
AUTHENTICATION_BACKENDS = [
'conf.backends.auth.SitePermissionBackend',
]
LOGIN_REDIRECT_URL = '/timesheet/'
LOGIN_URL = '/login/'
LOGOUT_REDIRECT_URL = '/login/'
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en'
TIME_ZONE = 'America/New_York'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Email settings
# https://docs.djangoproject.com/en/1.11/topics/email/
EMAIL_BACKEND = 'conf.backends.mail.EmailBackend'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'timestrap/static'),
]
# Rest framework
# http://www.django-rest-framework.org/
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'api.permissions.TimestrapDjangoModelPermissions'
],
'PAGE_SIZE': 25,
'DEFAULT_FILTER_BACKENDS': [
'django_filters.rest_framework.DjangoFilterBackend',
],
'UNICODE_JSON': False
}
|
bsd-2-clause
| -5,234,159,946,473,432,000 | 8,689,475,549,441,594,000 | 21.326389 | 79 | 0.677138 | false |
nimbusproject/epumgmt
|
src/python/tests/test_epumgmt_defaults_log_events.py
|
1
|
16586
|
import os
import shutil
import tempfile
import ConfigParser
import epumgmt.defaults.log_events
from epumgmt.defaults import DefaultParameters
from mocks.common import FakeCommon
class TestAmqpEvents:
def setup(self):
self.runlogdir = tempfile.mkdtemp()
self.vmlogdir = tempfile.mkdtemp()
producer_dir = os.path.join(self.runlogdir, "producer1-container")
os.mkdir(producer_dir)
self.producer_ioncontainer_log = os.path.join(producer_dir, "ioncontainer.log")
with open(self.producer_ioncontainer_log, "w") as container_file:
container_file.write("contents!")
consumer_dir = os.path.join(self.runlogdir, "epuworker_container")
os.mkdir(consumer_dir)
self.consumer_ioncontainer_log = os.path.join(consumer_dir, "ioncontainer.log")
with open(self.consumer_ioncontainer_log, "w") as container_file:
container_file.write("contents!")
self.config = ConfigParser.RawConfigParser()
self.config.add_section("events")
self.config.set("events", "runlogdir", self.runlogdir)
self.config.set("events", "vmlogdir", self.vmlogdir)
self.c = FakeCommon()
self.p = DefaultParameters(self.config, None)
self.amqp_events = epumgmt.defaults.log_events.AmqpEvents(self.p, self.c, None, "")
def teardown(self):
shutil.rmtree(self.runlogdir)
shutil.rmtree(self.vmlogdir)
def test_create_datetime(self):
year = 2011
month = 4
day = 5
hour = 4
minute = 3
second = 7
microsecond = 6
timestamp = { "year": year, "month": month, "day": day,
"hour": hour, "minute": minute, "second": second,
"microsecond": microsecond }
got_datetime = self.amqp_events._create_datetime(timestamp)
print dir(got_datetime)
assert got_datetime.year == year
assert got_datetime.minute == minute
assert got_datetime.day == day
def test_set_workproducerlog_filenames(self):
self.amqp_events._set_workproducerlog_filenames()
assert self.producer_ioncontainer_log in self.amqp_events.workproducerlog_filenames
def test_set_workconsumerlog_filenames(self):
self.amqp_events._set_workconsumerlog_filenames()
assert self.consumer_ioncontainer_log in self.amqp_events.workconsumerlog_filenames
def test_update_log_filenames(self):
self.amqp_events._update_log_filenames()
assert self.consumer_ioncontainer_log in self.amqp_events.workconsumerlog_filenames
assert self.producer_ioncontainer_log in self.amqp_events.workproducerlog_filenames
def test_get_event_datetimes_dict(self):
got_datetimes = self.amqp_events.get_event_datetimes_dict("fake event")
assert got_datetimes == {}
job_begin_id = 545454
job_begin_event = '2011-07-07 11:03:07,532 [cei_events : 32] WARNING:CLOUDYVENT_JSON: {"eventname": "job_begin", "timestamp": {"hour": 18, "month": 7, "second": 7, "microsecond": 532627, "year": 2011, "day": 7, "minute": 4}, "uniquekey": "2c5a9f30-a1b8-4621-ac68-d66ca1cd99f5", "eventsource": "worker", "extra": {"batchid": "xchg1310061055-jobs", "work_amount": 0, "jobid": %s}}\n' % job_begin_id
job_end_id = 424242
job_end_event = '2011-07-07 11:04:07,532 [cei_events : 32] WARNING:CLOUDYVENT_JSON: {"eventname": "job_end", "timestamp": {"hour": 18, "month": 7, "second": 7, "microsecond": 532627, "year": 2011, "day": 7, "minute": 4}, "uniquekey": "2c5a9f30-a1b8-4621-ac68-d66ca1cd99f5", "eventsource": "worker", "extra": {"batchid": "xchg1310061055-jobs", "work_amount": 0, "jobid": %s}}\n' % job_end_id
with open(self.consumer_ioncontainer_log, "w") as container:
container.write(job_begin_event + job_end_event)
job_sent_id = 424244
job_sent_event = '2011-07-07 11:04:07,532 [cei_events : 32] WARNING:CLOUDYVENT_JSON: {"eventname": "job_sent", "timestamp": {"hour": 18, "month": 7, "second": 7, "microsecond": 532627, "year": 2011, "day": 7, "minute": 4}, "uniquekey": "2c5a9f30-a1b8-4621-ac68-d66ca1cd99f5", "eventsource": "worker", "extra": {"batchid": "xchg1310061055-jobs", "work_amount": 0, "jobid": %s}}\n' % job_sent_id
with open(self.producer_ioncontainer_log, "w") as container:
container.write(job_sent_event)
got_datetimes = self.amqp_events.get_event_datetimes_dict("job_end")
assert got_datetimes.has_key(job_end_id)
got_datetimes = self.amqp_events.get_event_datetimes_dict("job_begin")
assert got_datetimes.has_key(job_begin_id)
got_datetimes = self.amqp_events.get_event_datetimes_dict("job_sent")
assert got_datetimes.has_key(job_sent_id)
def test_get_event_datetimes_dict_badfile(self):
job_sent_id = 424244
job_sent_event = '2011-07-07 11:04:07,532 [cei_events : 32] WARNING:CLOUDYVENT_JSON: {"eventname": "job_sent", "timestamp": {"hour": 18, "month": 7, "second": 7, "microsecond": 532627, "year": 2011, "day": 7, "minute": 4}, "uniquekey": "2c5a9f30-a1b8-4621-ac68-d66ca1cd99f5", "eventsource": "worker", "extra": {"batchid": "xchg1310061055-jobs", "work_amount": 0, "jobid": %s}}\n' % job_sent_id
with open(self.producer_ioncontainer_log, "w") as container:
container.write(job_sent_event)
old_mode = os.stat(self.producer_ioncontainer_log).st_mode
os.chmod(self.producer_ioncontainer_log, 0)
got_datetimes = self.amqp_events.get_event_datetimes_dict("job_sent")
failed_to_open = [message for (level, message)
in self.c.log.transcript
if level == "ERROR"
and "Failed to open and read" in message]
assert len(failed_to_open) == 1
os.chmod(self.producer_ioncontainer_log, old_mode)
def test_get_event_count(self):
job_sent_id = 424244
job_sent_event = '2011-07-07 11:04:07,532 [cei_events : 32] WARNING:CLOUDYVENT_JSON: {"eventname": "job_sent", "timestamp": {"hour": 18, "month": 7, "second": 7, "microsecond": 532627, "year": 2011, "day": 7, "minute": 4}, "uniquekey": "2c5a9f30-a1b8-4621-ac68-d66ca1cd99f5", "eventsource": "worker", "extra": {"batchid": "xchg1310061055-jobs", "work_amount": 0, "jobid": %s}}\n' % job_sent_id
with open(self.producer_ioncontainer_log, "w") as container:
container.write(job_sent_event)
count = self.amqp_events.get_event_count("job_sent")
assert count == 1
class TestControllerEvents:
def setup(self):
self.vardir = tempfile.mkdtemp()
self.runlogdir = "runlogs"
self.vmlogdir = "vmlogs"
self.test_event = "testevent 4-5-6 12:12:12.12"
controller_dir = os.path.join(self.vardir, self.runlogdir, "epucontrollerkill_logs")
os.makedirs(controller_dir)
self.controller_ioncontainer_log = os.path.join(controller_dir, "ioncontainer.log")
with open(self.controller_ioncontainer_log, "w") as container_file:
container_file.write(self.test_event)
self.config = ConfigParser.RawConfigParser()
self.config.add_section("events")
self.config.set("events", "runlogdir", self.runlogdir)
self.config.add_section("ecdirs")
self.config.set("ecdirs", "var", self.vardir)
self.p = DefaultParameters(self.config, None)
self.c = FakeCommon(self.p)
self.controller_events = epumgmt.defaults.log_events.ControllerEvents(self.p, self.c, None, "")
def teardown(self):
shutil.rmtree(self.vardir)
def test_set_controllerlog_filenames(self):
self.controller_events._set_controllerlog_filenames()
assert self.controller_ioncontainer_log in self.controller_events.controllerlog_filenames
def test_update_log_filenames(self):
self.controller_events._update_log_filenames()
assert self.controller_ioncontainer_log in self.controller_events.controllerlog_filenames
def test_create_datetime(self):
month = 8
day = 9
year = 1985
hour = 1
minute = 42
second = 43
microsecond = 44
date_string = "%s - %s - %s" % (year, month, day)
time_string = "%s:%s:%s.%s" % (hour, minute, second, microsecond)
datetime = self.controller_events._create_datetime(date_string, time_string)
assert datetime.month == month
def test_get_event_datetimes_list(self):
event = "testevent"
event_list = self.controller_events.get_event_datetimes_list(event)
assert len(event_list) == 1
class TestTorqueEvents:
def setup(self):
self.vardir = tempfile.mkdtemp()
self.runlogdir = "runlogs"
self.vmlogdir = "vmlogs"
self.job_id = 5
self.torque_event = "05/25/2011 15:57:42;0008;PBS_Server;Job;%s.ip-10-203-66-146.ec2.internal;Job Queued at request of [email protected], owner = [email protected], job name = tmp5TEZaU, queue = default" % self.job_id
torque_dir = os.path.join(self.vardir, self.runlogdir, "torque-server_logs")
os.makedirs(torque_dir)
self.torque_log = os.path.join(torque_dir, "torque.log")
with open(self.torque_log, "w") as torque_file:
torque_file.write(self.torque_event)
self.config = ConfigParser.RawConfigParser()
self.config.add_section("events")
self.config.set("events", "runlogdir", self.runlogdir)
self.config.add_section("ecdirs")
self.config.set("ecdirs", "var", self.vardir)
self.p = DefaultParameters(self.config, None)
self.c = FakeCommon(self.p)
self.torque_events = epumgmt.defaults.log_events.TorqueEvents(self.p, self.c, None, "")
def test_set_serverlog_filenames(self):
self.torque_events._set_serverlog_filenames()
assert self.torque_log in self.torque_events.serverlog_filenames
def test_update_log_filenames(self):
self.torque_events._update_log_filenames()
assert self.torque_log in self.torque_events.serverlog_filenames
def test_create_datetime(self):
year = 2011
month = 4
day = 5
hour = 4
minute = 3
second = 7
microsecond = 6
date = "%s/%s/%s" % (month, day, year)
time = "%s:%s:%s" % (hour, minute, second)
got_datetime = self.torque_events._create_datetime(date, time)
print dir(got_datetime)
assert got_datetime.year == year
assert got_datetime.minute == minute
assert got_datetime.day == day
assert got_datetime.hour - epumgmt.defaults.log_events.UTC_OFFSET == hour
def test_get_event_datetimes_dict(self):
# Test behaviour with bad event type
event = "non-existent"
event_times = self.torque_events.get_event_datetimes_dict(event)
assert event_times == {}
# Test correct parsing behaviour
event = "job_sent"
event_times = self.torque_events.get_event_datetimes_dict(event)
assert event_times.has_key(self.job_id)
# Test handling of non-readable file
self.c.log.transcript = []
os.chmod(self.torque_log, 0)
event = "job_sent"
event_times = self.torque_events.get_event_datetimes_dict(event)
errors = [message for (level, message)
in self.c.log.transcript
if level == "ERROR"]
print errors
assert "Failed to open and read from file" in errors[0]
class TestNodeEvents:
def setup(self):
self.vardir = tempfile.mkdtemp()
self.runlogdir = "runlogs"
self.logfiledir = "logs"
self.run_name = "test-run"
self.launch_ctx_id = "imauuidhonest"
self.launch_ctx_done = "2011-06-14 09:33:08,268 [cei_events : 32] WARNING:CLOUDYVENT_JSON: {\"eventname\": \"launch_ctx_done\", \"timestamp\": {\"hour\": 16, \"month\": 6, \"second\": 8, \"microsecond\": 268628, \"year\": 2011, \"day\": 14, \"minute\": 33}, \"uniquekey\": \"8311960b-2802-4976-ae4d-1c4e7e7b9ee5\", \"eventsource\": \"provisioner\", \"extra\": {\"launch_id\": \"e62df223-0d7d-4882-8583-98de1c14f5c8\", \"node_ids\": [\"%s\"]}}" % self.launch_ctx_id
self.vmkill_event_id = "arealid"
self.vmkill_event = "2011-06-14 09:33:08,268 [cei_events : 32] WARNING:CLOUDYVENT_JSON: {\"eventname\": \"fetch_killed\", \"timestamp\": {\"hour\": 16, \"month\": 6, \"second\": 8, \"microsecond\": 268628, \"year\": 2011, \"day\": 14, \"minute\": 33}, \"uniquekey\": \"8311960b-2802-4976-ae4d-1c4e7e7b9ee5\", \"eventsource\": \"provisioner\", \"extra\": {\"launch_id\": \"e62df223-0d7d-4882-8583-98de1c14f5c8\", \"iaas_id\": \"%s\"}}" % self.vmkill_event_id
provisioner_dir = os.path.join(self.vardir, self.runlogdir, self.run_name, "provisioner")
os.makedirs(provisioner_dir)
vmkill_dir = os.path.join(self.vardir, self.logfiledir)
os.makedirs(vmkill_dir)
self.provisioner_log = os.path.join(provisioner_dir, "ioncontainer.log")
with open(self.provisioner_log, "w") as provisioner_file:
provisioner_file.write(self.launch_ctx_done)
self.vmkill_log = os.path.join(vmkill_dir, "--%s-fetchkill-" % self.run_name)
with open(self.vmkill_log, "w") as vmkill_file:
vmkill_file.write(self.vmkill_event)
self.config = ConfigParser.RawConfigParser()
self.config.add_section("events")
self.config.set("events", "runlogdir", self.runlogdir)
self.config.add_section("logging")
self.config.set("logging", "logfiledir", self.logfiledir)
self.config.add_section("ecdirs")
self.config.set("ecdirs", "var", self.vardir)
self.p = DefaultParameters(self.config, None)
self.c = FakeCommon(self.p)
self.node_events = epumgmt.defaults.log_events.NodeEvents(self.p, self.c, None, self.run_name)
def teardown(self):
shutil.rmtree(self.vardir)
def test_create_datetime(self):
year = 2011
month = 4
day = 5
hour = 4
minute = 3
second = 7
microsecond = 6
timestamp = { "year": year, "month": month, "day": day,
"hour": hour, "minute": minute, "second": second,
"microsecond": microsecond }
got_datetime = self.node_events._create_datetime(timestamp)
print dir(got_datetime)
assert got_datetime.year == year
assert got_datetime.minute == minute
assert got_datetime.day == day
def test_set_provisionerlog_filenames(self):
self.node_events._set_provisionerlog_filenames()
assert self.provisioner_log in self.node_events.provisionerlog_filenames
def test_set_vmkilllog_filenames(self):
self.node_events._set_vmkilllog_filenames()
assert self.vmkill_log in self.node_events.vmkilllog_filenames
def test_update_log_filenames(self):
self.node_events._update_log_filenames()
assert self.vmkill_log in self.node_events.vmkilllog_filenames
assert self.provisioner_log in self.node_events.provisionerlog_filenames
def test_get_event_datetimes_dict(self):
event = "fake-event"
event_times = self.node_events.get_event_datetimes_dict(event)
assert event_times == {}
event = "launch_ctx_done"
event_times = self.node_events.get_event_datetimes_dict(event)
print event_times
assert event_times.has_key(self.launch_ctx_id)
event = "fetch_killed"
event_times = self.node_events.get_event_datetimes_dict(event)
print event_times
assert event_times.has_key(self.vmkill_event_id)
# test when we have an unreadable file
self.c.log.transcript = []
os.chmod(self.provisioner_log, 0)
event = "launch_ctx_done"
event_times = self.node_events.get_event_datetimes_dict(event)
print event_times
failed_to_open = [message for (level, message)
in self.c.log.transcript
if level == "ERROR"
and "Failed to open and read" in message]
assert len(failed_to_open) == 1
def test_get_event_count(self):
event_count = self.node_events.get_event_count("launch_ctx_done")
print event_count
assert event_count == 1
|
apache-2.0
| 3,722,686,426,051,794,400 | 2,756,303,219,392,400,000 | 39.552567 | 476 | 0.629447 | false |
maljac/odoo-addons
|
partner_views_fields/res_config.py
|
1
|
1422
|
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, models
class partner_configuration(models.TransientModel):
_inherit = 'base.config.settings'
group_ref = fields.Boolean(
"Show Reference On Partners Tree View",
implied_group='partner_views_fields.group_ref',
)
group_user_id = fields.Boolean(
"Show Commercial On Partners Tree View",
implied_group='partner_views_fields.group_user_id',
)
group_city = fields.Boolean(
"Show City On Partners Tree and Search Views",
implied_group='partner_views_fields.group_city',
)
group_state_id = fields.Boolean(
"Show State On Partners Tree and Search Views",
implied_group='partner_views_fields.group_state_id',
)
group_country_id = fields.Boolean(
"Show Country On Partners Tree and Search Views",
implied_group='partner_views_fields.group_country_id',
)
group_function = fields.Boolean(
"Show Function On Partners Tree and Search Views",
implied_group='partner_views_fields.group_function',
)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| 4,537,524,023,310,980,000 | -8,810,307,707,956,978,000 | 37.432432 | 78 | 0.587904 | false |
dennisss/sympy
|
sympy/assumptions/tests/test_assumptions_2.py
|
30
|
1961
|
"""
rename this to test_assumptions.py when the old assumptions system is deleted
"""
from sympy.abc import x, y
from sympy.assumptions.assume import global_assumptions, Predicate
from sympy.assumptions.ask import _extract_facts, Q
from sympy.core import symbols
from sympy.logic.boolalg import Or
from sympy.printing import pretty
def test_equal():
"""Test for equality"""
assert Q.positive(x) == Q.positive(x)
assert Q.positive(x) != ~Q.positive(x)
assert ~Q.positive(x) == ~Q.positive(x)
def test_pretty():
assert pretty(Q.positive(x)) == "Q.positive(x)"
assert pretty(
set([Q.positive, Q.integer])) == "set([Q.integer, Q.positive])"
def test_extract_facts():
a, b = symbols('a b', cls=Predicate)
assert _extract_facts(a(x), x) == a
assert _extract_facts(a(x), y) is None
assert _extract_facts(~a(x), x) == ~a
assert _extract_facts(~a(x), y) is None
assert _extract_facts(a(x) | b(x), x) == a | b
assert _extract_facts(a(x) | ~b(x), x) == a | ~b
assert _extract_facts(a(x) & b(y), x) == a
assert _extract_facts(a(x) & b(y), y) == b
assert _extract_facts(a(x) | b(y), x) == None
assert _extract_facts(~(a(x) | b(y)), x) == ~a
def test_global():
"""Test for global assumptions"""
global_assumptions.add(Q.is_true(x > 0))
assert Q.is_true(x > 0) in global_assumptions
global_assumptions.remove(Q.is_true(x > 0))
assert not Q.is_true(x > 0) in global_assumptions
# same with multiple of assumptions
global_assumptions.add(Q.is_true(x > 0), Q.is_true(y > 0))
assert Q.is_true(x > 0) in global_assumptions
assert Q.is_true(y > 0) in global_assumptions
global_assumptions.clear()
assert not Q.is_true(x > 0) in global_assumptions
assert not Q.is_true(y > 0) in global_assumptions
def test_composite_predicates():
pred = Q.integer | ~Q.positive
assert type(pred(x)) is Or
assert pred(x) == Q.integer(x) | ~Q.positive(x)
|
bsd-3-clause
| -2,972,781,806,966,772,700 | -613,970,944,468,113,500 | 33.403509 | 77 | 0.64049 | false |
hbrunn/OCB
|
addons/account/report/account_partner_balance.py
|
286
|
11049
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.tools.translate import _
from openerp.report import report_sxw
from common_report_header import common_report_header
class partner_balance(report_sxw.rml_parse, common_report_header):
def __init__(self, cr, uid, name, context=None):
super(partner_balance, self).__init__(cr, uid, name, context=context)
self.account_ids = []
self.localcontext.update( {
'time': time,
'get_fiscalyear': self._get_fiscalyear,
'get_journal': self._get_journal,
'get_filter': self._get_filter,
'get_account': self._get_account,
'get_start_date':self._get_start_date,
'get_end_date':self._get_end_date,
'get_start_period': self.get_start_period,
'get_end_period': self.get_end_period,
'get_partners':self._get_partners,
'get_target_move': self._get_target_move,
})
def set_context(self, objects, data, ids, report_type=None):
self.display_partner = data['form'].get('display_partner', 'non-zero_balance')
obj_move = self.pool.get('account.move.line')
self.query = obj_move._query_get(self.cr, self.uid, obj='l', context=data['form'].get('used_context', {}))
self.result_selection = data['form'].get('result_selection')
self.target_move = data['form'].get('target_move', 'all')
if (self.result_selection == 'customer' ):
self.ACCOUNT_TYPE = ('receivable',)
elif (self.result_selection == 'supplier'):
self.ACCOUNT_TYPE = ('payable',)
else:
self.ACCOUNT_TYPE = ('payable', 'receivable')
self.cr.execute("SELECT a.id " \
"FROM account_account a " \
"LEFT JOIN account_account_type t " \
"ON (a.type = t.code) " \
"WHERE a.type IN %s " \
"AND a.active", (self.ACCOUNT_TYPE,))
self.account_ids = [a for (a,) in self.cr.fetchall()]
res = super(partner_balance, self).set_context(objects, data, ids, report_type=report_type)
lines = self.lines()
sum_debit = sum_credit = sum_litige = 0
for line in filter(lambda x: x['type'] == 3, lines):
sum_debit += line['debit'] or 0
sum_credit += line['credit'] or 0
sum_litige += line['enlitige'] or 0
self.localcontext.update({
'lines': lambda: lines,
'sum_debit': lambda: sum_debit,
'sum_credit': lambda: sum_credit,
'sum_litige': lambda: sum_litige,
})
return res
def lines(self):
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
full_account = []
self.cr.execute(
"SELECT p.ref,l.account_id,ac.name AS account_name,ac.code AS code,p.name, sum(debit) AS debit, sum(credit) AS credit, " \
"CASE WHEN sum(debit) > sum(credit) " \
"THEN sum(debit) - sum(credit) " \
"ELSE 0 " \
"END AS sdebit, " \
"CASE WHEN sum(debit) < sum(credit) " \
"THEN sum(credit) - sum(debit) " \
"ELSE 0 " \
"END AS scredit, " \
"(SELECT sum(debit-credit) " \
"FROM account_move_line l " \
"WHERE partner_id = p.id " \
"AND " + self.query + " " \
"AND blocked = TRUE " \
") AS enlitige " \
"FROM account_move_line l LEFT JOIN res_partner p ON (l.partner_id=p.id) " \
"JOIN account_account ac ON (l.account_id = ac.id)" \
"JOIN account_move am ON (am.id = l.move_id)" \
"WHERE ac.type IN %s " \
"AND am.state IN %s " \
"AND " + self.query + "" \
"GROUP BY p.id, p.ref, p.name,l.account_id,ac.name,ac.code " \
"ORDER BY l.account_id,p.name",
(self.ACCOUNT_TYPE, tuple(move_state)))
res = self.cr.dictfetchall()
if self.display_partner == 'non-zero_balance':
full_account = [r for r in res if r['sdebit'] > 0 or r['scredit'] > 0]
else:
full_account = [r for r in res]
for rec in full_account:
if not rec.get('name', False):
rec.update({'name': _('Unknown Partner')})
## We will now compute Total
subtotal_row = self._add_subtotal(full_account)
return subtotal_row
def _add_subtotal(self, cleanarray):
i = 0
completearray = []
tot_debit = 0.0
tot_credit = 0.0
tot_scredit = 0.0
tot_sdebit = 0.0
tot_enlitige = 0.0
for r in cleanarray:
# For the first element we always add the line
# type = 1 is the line is the first of the account
# type = 2 is an other line of the account
if i==0:
# We add the first as the header
#
##
new_header = {}
new_header['ref'] = ''
new_header['name'] = r['account_name']
new_header['code'] = r['code']
new_header['debit'] = r['debit']
new_header['credit'] = r['credit']
new_header['scredit'] = tot_scredit
new_header['sdebit'] = tot_sdebit
new_header['enlitige'] = tot_enlitige
new_header['balance'] = r['debit'] - r['credit']
new_header['type'] = 3
##
completearray.append(new_header)
#
r['type'] = 1
r['balance'] = float(r['sdebit']) - float(r['scredit'])
completearray.append(r)
#
tot_debit = r['debit']
tot_credit = r['credit']
tot_scredit = r['scredit']
tot_sdebit = r['sdebit']
tot_enlitige = (r['enlitige'] or 0.0)
#
else:
if cleanarray[i]['account_id'] <> cleanarray[i-1]['account_id']:
new_header['debit'] = tot_debit
new_header['credit'] = tot_credit
new_header['scredit'] = tot_scredit
new_header['sdebit'] = tot_sdebit
new_header['enlitige'] = tot_enlitige
new_header['balance'] = float(tot_sdebit) - float(tot_scredit)
new_header['type'] = 3
# we reset the counter
tot_debit = r['debit']
tot_credit = r['credit']
tot_scredit = r['scredit']
tot_sdebit = r['sdebit']
tot_enlitige = (r['enlitige'] or 0.0)
#
##
new_header = {}
new_header['ref'] = ''
new_header['name'] = r['account_name']
new_header['code'] = r['code']
new_header['debit'] = tot_debit
new_header['credit'] = tot_credit
new_header['scredit'] = tot_scredit
new_header['sdebit'] = tot_sdebit
new_header['enlitige'] = tot_enlitige
new_header['balance'] = float(tot_sdebit) - float(tot_scredit)
new_header['type'] = 3
##get_fiscalyear
##
completearray.append(new_header)
##
#
r['type'] = 1
#
r['balance'] = float(r['sdebit']) - float(r['scredit'])
completearray.append(r)
if cleanarray[i]['account_id'] == cleanarray[i-1]['account_id']:
# we reset the counter
new_header['debit'] = tot_debit
new_header['credit'] = tot_credit
new_header['scredit'] = tot_scredit
new_header['sdebit'] = tot_sdebit
new_header['enlitige'] = tot_enlitige
new_header['balance'] = float(tot_sdebit) - float(tot_scredit)
new_header['type'] = 3
tot_debit = tot_debit + r['debit']
tot_credit = tot_credit + r['credit']
tot_scredit = tot_scredit + r['scredit']
tot_sdebit = tot_sdebit + r['sdebit']
tot_enlitige = tot_enlitige + (r['enlitige'] or 0.0)
new_header['debit'] = tot_debit
new_header['credit'] = tot_credit
new_header['scredit'] = tot_scredit
new_header['sdebit'] = tot_sdebit
new_header['enlitige'] = tot_enlitige
new_header['balance'] = float(tot_sdebit) - float(tot_scredit)
#
r['type'] = 2
#
r['balance'] = float(r['sdebit']) - float(r['scredit'])
#
completearray.append(r)
i = i + 1
return completearray
def _get_partners(self):
if self.result_selection == 'customer':
return _('Receivable Accounts')
elif self.result_selection == 'supplier':
return _('Payable Accounts')
elif self.result_selection == 'customer_supplier':
return _('Receivable and Payable Accounts')
return ''
class report_partnerbalance(osv.AbstractModel):
_name = 'report.account.report_partnerbalance'
_inherit = 'report.abstract_report'
_template = 'account.report_partnerbalance'
_wrapped_report_class = partner_balance
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| 6,349,240,262,014,174,000 | -6,482,885,877,985,185,000 | 40.69434 | 134 | 0.487284 | false |
haeusser/tensorflow
|
tensorflow/contrib/legacy_seq2seq/python/kernel_tests/seq2seq_test.py
|
5
|
43745
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for functional style sequence-to-sequence models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
import random
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
from tensorflow.contrib.legacy_seq2seq.python.ops import seq2seq as seq2seq_lib
from tensorflow.contrib.rnn.python.ops import core_rnn
from tensorflow.contrib.rnn.python.ops import core_rnn_cell_impl
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import rnn
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adam
class Seq2SeqTest(test.TestCase):
def testRNNDecoder(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
_, enc_state = core_rnn.static_rnn(
core_rnn_cell_impl.GRUCell(2), inp, dtype=dtypes.float32)
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
cell = core_rnn_cell_impl.OutputProjectionWrapper(
core_rnn_cell_impl.GRUCell(2), 4)
dec, mem = seq2seq_lib.rnn_decoder(dec_inp, enc_state, cell)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testBasicRNNSeq2Seq(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
cell = core_rnn_cell_impl.OutputProjectionWrapper(
core_rnn_cell_impl.GRUCell(2), 4)
dec, mem = seq2seq_lib.basic_rnn_seq2seq(inp, dec_inp, cell)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testTiedRNNSeq2Seq(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
cell = core_rnn_cell_impl.OutputProjectionWrapper(
core_rnn_cell_impl.GRUCell(2), 4)
dec, mem = seq2seq_lib.tied_rnn_seq2seq(inp, dec_inp, cell)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual(1, len(res))
self.assertEqual((2, 2), res[0].shape)
def testEmbeddingRNNDecoder(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
cell_fn = lambda: core_rnn_cell_impl.BasicLSTMCell(2)
cell = cell_fn()
_, enc_state = core_rnn.static_rnn(cell, inp, dtype=dtypes.float32)
dec_inp = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.embedding_rnn_decoder(
dec_inp, enc_state, cell_fn(), num_symbols=4, embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
res = sess.run([mem])
self.assertEqual(1, len(res))
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
def testEmbeddingRNNSeq2Seq(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
enc_inp = [
constant_op.constant(
1, dtypes.int32, shape=[2]) for i in range(2)
]
dec_inp = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
cell_fn = lambda: core_rnn_cell_impl.BasicLSTMCell(2)
cell = cell_fn()
dec, mem = seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test with state_is_tuple=False.
with variable_scope.variable_scope("no_tuple"):
cell_nt = core_rnn_cell_impl.BasicLSTMCell(2, state_is_tuple=False)
dec, mem = seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp,
cell_nt,
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 4), res[0].shape)
# Test externally provided output projection.
w = variable_scope.get_variable("proj_w", [2, 5])
b = variable_scope.get_variable("proj_b", [5])
with variable_scope.variable_scope("proj_seq2seq"):
dec, _ = seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp,
cell_fn(),
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2,
output_projection=(w, b))
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
# Test that previous-feeding model ignores inputs after the first.
dec_inp2 = [
constant_op.constant(
0, dtypes.int32, shape=[2]) for _ in range(3)
]
with variable_scope.variable_scope("other"):
d3, _ = seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp2,
cell_fn(),
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2,
feed_previous=constant_op.constant(True))
with variable_scope.variable_scope("other_2"):
d1, _ = seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp,
cell_fn(),
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2,
feed_previous=True)
with variable_scope.variable_scope("other_3"):
d2, _ = seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp2,
cell_fn(),
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2,
feed_previous=True)
sess.run([variables.global_variables_initializer()])
res1 = sess.run(d1)
res2 = sess.run(d2)
res3 = sess.run(d3)
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testEmbeddingTiedRNNSeq2Seq(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
enc_inp = [
constant_op.constant(
1, dtypes.int32, shape=[2]) for i in range(2)
]
dec_inp = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
cell = functools.partial(
core_rnn_cell_impl.BasicLSTMCell,
2, state_is_tuple=True)
dec, mem = seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell(), num_symbols=5, embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test when num_decoder_symbols is provided, the size of decoder output
# is num_decoder_symbols.
with variable_scope.variable_scope("decoder_symbols_seq2seq"):
dec, mem = seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp,
cell(),
num_symbols=5,
num_decoder_symbols=3,
embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 3), res[0].shape)
# Test externally provided output projection.
w = variable_scope.get_variable("proj_w", [2, 5])
b = variable_scope.get_variable("proj_b", [5])
with variable_scope.variable_scope("proj_seq2seq"):
dec, _ = seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp,
cell(),
num_symbols=5,
embedding_size=2,
output_projection=(w, b))
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
# Test that previous-feeding model ignores inputs after the first.
dec_inp2 = [constant_op.constant(0, dtypes.int32, shape=[2])] * 3
with variable_scope.variable_scope("other"):
d3, _ = seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp2,
cell(),
num_symbols=5,
embedding_size=2,
feed_previous=constant_op.constant(True))
with variable_scope.variable_scope("other_2"):
d1, _ = seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp,
cell(),
num_symbols=5,
embedding_size=2,
feed_previous=True)
with variable_scope.variable_scope("other_3"):
d2, _ = seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp2,
cell(),
num_symbols=5,
embedding_size=2,
feed_previous=True)
sess.run([variables.global_variables_initializer()])
res1 = sess.run(d1)
res2 = sess.run(d2)
res3 = sess.run(d3)
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testAttentionDecoder1(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell_fn = lambda: core_rnn_cell_impl.GRUCell(2)
cell = cell_fn()
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = core_rnn.static_rnn(
cell, inp, dtype=dtypes.float32)
attn_states = array_ops.concat([
array_ops.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs
], 1)
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
# Create a new cell instance for the decoder, since it uses a
# different variable scope
dec, mem = seq2seq_lib.attention_decoder(
dec_inp, enc_state, attn_states, cell_fn(), output_size=4)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testAttentionDecoder2(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell_fn = lambda: core_rnn_cell_impl.GRUCell(2)
cell = cell_fn()
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = core_rnn.static_rnn(
cell, inp, dtype=dtypes.float32)
attn_states = array_ops.concat([
array_ops.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs
], 1)
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.attention_decoder(
dec_inp, enc_state, attn_states, cell_fn(),
output_size=4, num_heads=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testDynamicAttentionDecoder1(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell_fn = lambda: core_rnn_cell_impl.GRUCell(2)
cell = cell_fn()
inp = constant_op.constant(0.5, shape=[2, 2, 2])
enc_outputs, enc_state = rnn.dynamic_rnn(
cell, inp, dtype=dtypes.float32)
attn_states = enc_outputs
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.attention_decoder(
dec_inp, enc_state, attn_states, cell_fn(), output_size=4)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testDynamicAttentionDecoder2(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell_fn = lambda: core_rnn_cell_impl.GRUCell(2)
cell = cell_fn()
inp = constant_op.constant(0.5, shape=[2, 2, 2])
enc_outputs, enc_state = rnn.dynamic_rnn(
cell, inp, dtype=dtypes.float32)
attn_states = enc_outputs
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.attention_decoder(
dec_inp, enc_state, attn_states, cell_fn(),
output_size=4, num_heads=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testAttentionDecoderStateIsTuple(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
single_cell = lambda: core_rnn_cell_impl.BasicLSTMCell( # pylint: disable=g-long-lambda
2, state_is_tuple=True)
cell_fn = lambda: core_rnn_cell_impl.MultiRNNCell( # pylint: disable=g-long-lambda
cells=[single_cell() for _ in range(2)], state_is_tuple=True)
cell = cell_fn()
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = core_rnn.static_rnn(
cell, inp, dtype=dtypes.float32)
attn_states = array_ops.concat([
array_ops.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs
], 1)
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.attention_decoder(
dec_inp, enc_state, attn_states, cell_fn(), output_size=4)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual(2, len(res[0]))
self.assertEqual((2, 2), res[0][0].c.shape)
self.assertEqual((2, 2), res[0][0].h.shape)
self.assertEqual((2, 2), res[0][1].c.shape)
self.assertEqual((2, 2), res[0][1].h.shape)
def testDynamicAttentionDecoderStateIsTuple(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell_fn = lambda: core_rnn_cell_impl.MultiRNNCell( # pylint: disable=g-long-lambda
cells=[core_rnn_cell_impl.BasicLSTMCell(2) for _ in range(2)])
cell = cell_fn()
inp = constant_op.constant(0.5, shape=[2, 2, 2])
enc_outputs, enc_state = core_rnn.static_rnn(
cell, inp, dtype=dtypes.float32)
attn_states = array_ops.concat([
array_ops.reshape(e, [-1, 1, cell.output_size])
for e in enc_outputs
], 1)
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.attention_decoder(
dec_inp, enc_state, attn_states, cell_fn(), output_size=4)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual(2, len(res[0]))
self.assertEqual((2, 2), res[0][0].c.shape)
self.assertEqual((2, 2), res[0][0].h.shape)
self.assertEqual((2, 2), res[0][1].c.shape)
self.assertEqual((2, 2), res[0][1].h.shape)
def testEmbeddingAttentionDecoder(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
cell_fn = lambda: core_rnn_cell_impl.GRUCell(2)
cell = cell_fn()
enc_outputs, enc_state = core_rnn.static_rnn(
cell, inp, dtype=dtypes.float32)
attn_states = array_ops.concat([
array_ops.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs
], 1)
dec_inp = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.embedding_attention_decoder(
dec_inp,
enc_state,
attn_states,
cell_fn(),
num_symbols=4,
embedding_size=2,
output_size=3)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 3), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testEmbeddingAttentionSeq2Seq(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
enc_inp = [
constant_op.constant(
1, dtypes.int32, shape=[2]) for i in range(2)
]
dec_inp = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
cell_fn = lambda: core_rnn_cell_impl.BasicLSTMCell(2)
cell = cell_fn()
dec, mem = seq2seq_lib.embedding_attention_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test with state_is_tuple=False.
with variable_scope.variable_scope("no_tuple"):
cell_fn = functools.partial(
core_rnn_cell_impl.BasicLSTMCell,
2, state_is_tuple=False)
cell_nt = cell_fn()
dec, mem = seq2seq_lib.embedding_attention_seq2seq(
enc_inp,
dec_inp,
cell_nt,
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 4), res[0].shape)
# Test externally provided output projection.
w = variable_scope.get_variable("proj_w", [2, 5])
b = variable_scope.get_variable("proj_b", [5])
with variable_scope.variable_scope("proj_seq2seq"):
dec, _ = seq2seq_lib.embedding_attention_seq2seq(
enc_inp,
dec_inp,
cell_fn(),
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2,
output_projection=(w, b))
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
# TODO(ebrevdo, lukaszkaiser): Re-enable once RNNCells allow reuse
# within a variable scope that already has a weights tensor.
#
# # Test that previous-feeding model ignores inputs after the first.
# dec_inp2 = [
# constant_op.constant(
# 0, dtypes.int32, shape=[2]) for _ in range(3)
# ]
# with variable_scope.variable_scope("other"):
# d3, _ = seq2seq_lib.embedding_attention_seq2seq(
# enc_inp,
# dec_inp2,
# cell_fn(),
# num_encoder_symbols=2,
# num_decoder_symbols=5,
# embedding_size=2,
# feed_previous=constant_op.constant(True))
# sess.run([variables.global_variables_initializer()])
# variable_scope.get_variable_scope().reuse_variables()
# cell = cell_fn()
# d1, _ = seq2seq_lib.embedding_attention_seq2seq(
# enc_inp,
# dec_inp,
# cell,
# num_encoder_symbols=2,
# num_decoder_symbols=5,
# embedding_size=2,
# feed_previous=True)
# d2, _ = seq2seq_lib.embedding_attention_seq2seq(
# enc_inp,
# dec_inp2,
# cell,
# num_encoder_symbols=2,
# num_decoder_symbols=5,
# embedding_size=2,
# feed_previous=True)
# res1 = sess.run(d1)
# res2 = sess.run(d2)
# res3 = sess.run(d3)
# self.assertAllClose(res1, res2)
# self.assertAllClose(res1, res3)
def testOne2ManyRNNSeq2Seq(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
enc_inp = [
constant_op.constant(
1, dtypes.int32, shape=[2]) for i in range(2)
]
dec_inp_dict = {}
dec_inp_dict["0"] = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
dec_inp_dict["1"] = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(4)
]
dec_symbols_dict = {"0": 5, "1": 6}
def EncCellFn():
return core_rnn_cell_impl.BasicLSTMCell(2, state_is_tuple=True)
def DecCellsFn():
return dict(
(k, core_rnn_cell_impl.BasicLSTMCell(2, state_is_tuple=True))
for k in dec_symbols_dict)
outputs_dict, state_dict = (seq2seq_lib.one2many_rnn_seq2seq(
enc_inp, dec_inp_dict, EncCellFn(), DecCellsFn(),
2, dec_symbols_dict, embedding_size=2))
sess.run([variables.global_variables_initializer()])
res = sess.run(outputs_dict["0"])
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run(outputs_dict["1"])
self.assertEqual(4, len(res))
self.assertEqual((2, 6), res[0].shape)
res = sess.run([state_dict["0"]])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
res = sess.run([state_dict["1"]])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test that previous-feeding model ignores inputs after the first, i.e.
# dec_inp_dict2 has different inputs from dec_inp_dict after the first
# time-step.
dec_inp_dict2 = {}
dec_inp_dict2["0"] = [
constant_op.constant(
0, dtypes.int32, shape=[2]) for _ in range(3)
]
dec_inp_dict2["1"] = [
constant_op.constant(
0, dtypes.int32, shape=[2]) for _ in range(4)
]
with variable_scope.variable_scope("other"):
outputs_dict3, _ = seq2seq_lib.one2many_rnn_seq2seq(
enc_inp,
dec_inp_dict2,
EncCellFn(),
DecCellsFn(),
2,
dec_symbols_dict,
embedding_size=2,
feed_previous=constant_op.constant(True))
with variable_scope.variable_scope("other_2"):
outputs_dict1, _ = seq2seq_lib.one2many_rnn_seq2seq(
enc_inp,
dec_inp_dict,
EncCellFn(),
DecCellsFn(),
2,
dec_symbols_dict,
embedding_size=2,
feed_previous=True)
with variable_scope.variable_scope("other_3"):
outputs_dict2, _ = seq2seq_lib.one2many_rnn_seq2seq(
enc_inp,
dec_inp_dict2,
EncCellFn(),
DecCellsFn(),
2,
dec_symbols_dict,
embedding_size=2,
feed_previous=True)
sess.run([variables.global_variables_initializer()])
res1 = sess.run(outputs_dict1["0"])
res2 = sess.run(outputs_dict2["0"])
res3 = sess.run(outputs_dict3["0"])
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testSequenceLoss(self):
with self.test_session() as sess:
logits = [constant_op.constant(i + 0.5, shape=[2, 5]) for i in range(3)]
targets = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
weights = [constant_op.constant(1.0, shape=[2]) for i in range(3)]
average_loss_per_example = seq2seq_lib.sequence_loss(
logits,
targets,
weights,
average_across_timesteps=True,
average_across_batch=True)
res = sess.run(average_loss_per_example)
self.assertAllClose(1.60944, res)
average_loss_per_sequence = seq2seq_lib.sequence_loss(
logits,
targets,
weights,
average_across_timesteps=False,
average_across_batch=True)
res = sess.run(average_loss_per_sequence)
self.assertAllClose(4.828314, res)
total_loss = seq2seq_lib.sequence_loss(
logits,
targets,
weights,
average_across_timesteps=False,
average_across_batch=False)
res = sess.run(total_loss)
self.assertAllClose(9.656628, res)
def testSequenceLossByExample(self):
with self.test_session() as sess:
output_classes = 5
logits = [
constant_op.constant(
i + 0.5, shape=[2, output_classes]) for i in range(3)
]
targets = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
weights = [constant_op.constant(1.0, shape=[2]) for i in range(3)]
average_loss_per_example = (seq2seq_lib.sequence_loss_by_example(
logits, targets, weights, average_across_timesteps=True))
res = sess.run(average_loss_per_example)
self.assertAllClose(np.asarray([1.609438, 1.609438]), res)
loss_per_sequence = seq2seq_lib.sequence_loss_by_example(
logits, targets, weights, average_across_timesteps=False)
res = sess.run(loss_per_sequence)
self.assertAllClose(np.asarray([4.828314, 4.828314]), res)
# TODO(ebrevdo, lukaszkaiser): Re-enable once RNNCells allow reuse
# within a variable scope that already has a weights tensor.
#
# def testModelWithBucketsScopeAndLoss(self):
# """Test variable scope reuse is not reset after model_with_buckets."""
# classes = 10
# buckets = [(4, 4), (8, 8)]
# with self.test_session():
# # Here comes a sample Seq2Seq model using GRU cells.
# def SampleGRUSeq2Seq(enc_inp, dec_inp, weights, per_example_loss):
# """Example sequence-to-sequence model that uses GRU cells."""
# def GRUSeq2Seq(enc_inp, dec_inp):
# cell = core_rnn_cell_impl.MultiRNNCell(
# [core_rnn_cell_impl.GRUCell(24) for _ in range(2)])
# return seq2seq_lib.embedding_attention_seq2seq(
# enc_inp,
# dec_inp,
# cell,
# num_encoder_symbols=classes,
# num_decoder_symbols=classes,
# embedding_size=24)
# targets = [dec_inp[i + 1] for i in range(len(dec_inp) - 1)] + [0]
# return seq2seq_lib.model_with_buckets(
# enc_inp,
# dec_inp,
# targets,
# weights,
# buckets,
# GRUSeq2Seq,
# per_example_loss=per_example_loss)
# # Now we construct the copy model.
# inp = [
# array_ops.placeholder(
# dtypes.int32, shape=[None]) for _ in range(8)
# ]
# out = [
# array_ops.placeholder(
# dtypes.int32, shape=[None]) for _ in range(8)
# ]
# weights = [
# array_ops.ones_like(
# inp[0], dtype=dtypes.float32) for _ in range(8)
# ]
# with variable_scope.variable_scope("root"):
# _, losses1 = SampleGRUSeq2Seq(
# inp, out, weights, per_example_loss=False)
# # Now check that we did not accidentally set reuse.
# self.assertEqual(False, variable_scope.get_variable_scope().reuse)
# with variable_scope.variable_scope("new"):
# _, losses2 = SampleGRUSeq2Seq
# inp, out, weights, per_example_loss=True)
# # First loss is scalar, the second one is a 1-dimensinal tensor.
# self.assertEqual([], losses1[0].get_shape().as_list())
# self.assertEqual([None], losses2[0].get_shape().as_list())
def testModelWithBuckets(self):
"""Larger tests that does full sequence-to-sequence model training."""
# We learn to copy 10 symbols in 2 buckets: length 4 and length 8.
classes = 10
buckets = [(4, 4), (8, 8)]
perplexities = [[], []] # Results for each bucket.
random_seed.set_random_seed(111)
random.seed(111)
np.random.seed(111)
with self.test_session() as sess:
# We use sampled softmax so we keep output projection separate.
w = variable_scope.get_variable("proj_w", [24, classes])
w_t = array_ops.transpose(w)
b = variable_scope.get_variable("proj_b", [classes])
# Here comes a sample Seq2Seq model using GRU cells.
def SampleGRUSeq2Seq(enc_inp, dec_inp, weights):
"""Example sequence-to-sequence model that uses GRU cells."""
def GRUSeq2Seq(enc_inp, dec_inp):
cell = core_rnn_cell_impl.MultiRNNCell(
[core_rnn_cell_impl.GRUCell(24) for _ in range(2)],
state_is_tuple=True)
return seq2seq_lib.embedding_attention_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols=classes,
num_decoder_symbols=classes,
embedding_size=24,
output_projection=(w, b))
targets = [dec_inp[i + 1] for i in range(len(dec_inp) - 1)] + [0]
def SampledLoss(labels, inputs):
labels = array_ops.reshape(labels, [-1, 1])
return nn_impl.sampled_softmax_loss(
weights=w_t,
biases=b,
labels=labels,
inputs=inputs,
num_sampled=8,
num_classes=classes)
return seq2seq_lib.model_with_buckets(
enc_inp,
dec_inp,
targets,
weights,
buckets,
GRUSeq2Seq,
softmax_loss_function=SampledLoss)
# Now we construct the copy model.
batch_size = 8
inp = [
array_ops.placeholder(
dtypes.int32, shape=[None]) for _ in range(8)
]
out = [
array_ops.placeholder(
dtypes.int32, shape=[None]) for _ in range(8)
]
weights = [
array_ops.ones_like(
inp[0], dtype=dtypes.float32) for _ in range(8)
]
with variable_scope.variable_scope("root"):
_, losses = SampleGRUSeq2Seq(inp, out, weights)
updates = []
params = variables.all_variables()
optimizer = adam.AdamOptimizer(0.03, epsilon=1e-5)
for i in range(len(buckets)):
full_grads = gradients_impl.gradients(losses[i], params)
grads, _ = clip_ops.clip_by_global_norm(full_grads, 30.0)
update = optimizer.apply_gradients(zip(grads, params))
updates.append(update)
sess.run([variables.global_variables_initializer()])
steps = 6
for _ in range(steps):
bucket = random.choice(np.arange(len(buckets)))
length = buckets[bucket][0]
i = [
np.array(
[np.random.randint(9) + 1 for _ in range(batch_size)],
dtype=np.int32) for _ in range(length)
]
# 0 is our "GO" symbol here.
o = [np.array([0] * batch_size, dtype=np.int32)] + i
feed = {}
for i1, i2, o1, o2 in zip(inp[:length], i[:length], out[:length],
o[:length]):
feed[i1.name] = i2
feed[o1.name] = o2
if length < 8: # For the 4-bucket, we need the 5th as target.
feed[out[length].name] = o[length]
res = sess.run([updates[bucket], losses[bucket]], feed)
perplexities[bucket].append(math.exp(float(res[1])))
for bucket in range(len(buckets)):
if len(perplexities[bucket]) > 1: # Assert that perplexity went down.
self.assertLess(perplexities[bucket][-1], # 10% margin of error.
1.1 * perplexities[bucket][0])
def testModelWithBooleanFeedPrevious(self):
"""Test the model behavior when feed_previous is True.
For example, the following two cases have the same effect:
- Train `embedding_rnn_seq2seq` with `feed_previous=True`, which contains
a `embedding_rnn_decoder` with `feed_previous=True` and
`update_embedding_for_previous=True`. The decoder is fed with "<Go>"
and outputs "A, B, C".
- Train `embedding_rnn_seq2seq` with `feed_previous=False`. The decoder
is fed with "<Go>, A, B".
"""
num_encoder_symbols = 3
num_decoder_symbols = 5
batch_size = 2
num_enc_timesteps = 2
num_dec_timesteps = 3
def TestModel(seq2seq):
with self.test_session(graph=ops.Graph()) as sess:
random_seed.set_random_seed(111)
random.seed(111)
np.random.seed(111)
enc_inp = [
constant_op.constant(
i + 1, dtypes.int32, shape=[batch_size])
for i in range(num_enc_timesteps)
]
dec_inp_fp_true = [
constant_op.constant(
i, dtypes.int32, shape=[batch_size])
for i in range(num_dec_timesteps)
]
dec_inp_holder_fp_false = [
array_ops.placeholder(
dtypes.int32, shape=[batch_size])
for _ in range(num_dec_timesteps)
]
targets = [
constant_op.constant(
i + 1, dtypes.int32, shape=[batch_size])
for i in range(num_dec_timesteps)
]
weights = [
constant_op.constant(
1.0, shape=[batch_size]) for i in range(num_dec_timesteps)
]
def ForwardBackward(enc_inp, dec_inp, feed_previous):
scope_name = "fp_{}".format(feed_previous)
with variable_scope.variable_scope(scope_name):
dec_op, _ = seq2seq(enc_inp, dec_inp, feed_previous=feed_previous)
net_variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
scope_name)
optimizer = adam.AdamOptimizer(0.03, epsilon=1e-5)
update_op = optimizer.minimize(
seq2seq_lib.sequence_loss(dec_op, targets, weights),
var_list=net_variables)
return dec_op, update_op, net_variables
dec_op_fp_true, update_fp_true, variables_fp_true = ForwardBackward(
enc_inp, dec_inp_fp_true, feed_previous=True)
dec_op_fp_false, update_fp_false, variables_fp_false = ForwardBackward(
enc_inp, dec_inp_holder_fp_false, feed_previous=False)
sess.run(variables.global_variables_initializer())
# We only check consistencies between the variables existing in both
# the models with True and False feed_previous. Variables created by
# the loop_function in the model with True feed_previous are ignored.
v_false_name_dict = {
v.name.split("/", 1)[-1]: v
for v in variables_fp_false
}
matched_variables = [(v, v_false_name_dict[v.name.split("/", 1)[-1]])
for v in variables_fp_true]
for v_true, v_false in matched_variables:
sess.run(state_ops.assign(v_false, v_true))
# Take the symbols generated by the decoder with feed_previous=True as
# the true input symbols for the decoder with feed_previous=False.
dec_fp_true = sess.run(dec_op_fp_true)
output_symbols_fp_true = np.argmax(dec_fp_true, axis=2)
dec_inp_fp_false = np.vstack((dec_inp_fp_true[0].eval(),
output_symbols_fp_true[:-1]))
sess.run(update_fp_true)
sess.run(update_fp_false, {
holder: inp
for holder, inp in zip(dec_inp_holder_fp_false, dec_inp_fp_false)
})
for v_true, v_false in matched_variables:
self.assertAllClose(v_true.eval(), v_false.eval())
def EmbeddingRNNSeq2SeqF(enc_inp, dec_inp, feed_previous):
cell = core_rnn_cell_impl.BasicLSTMCell(2, state_is_tuple=True)
return seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols,
num_decoder_symbols,
embedding_size=2,
feed_previous=feed_previous)
def EmbeddingRNNSeq2SeqNoTupleF(enc_inp, dec_inp, feed_previous):
cell = core_rnn_cell_impl.BasicLSTMCell(2, state_is_tuple=False)
return seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols,
num_decoder_symbols,
embedding_size=2,
feed_previous=feed_previous)
def EmbeddingTiedRNNSeq2Seq(enc_inp, dec_inp, feed_previous):
cell = core_rnn_cell_impl.BasicLSTMCell(2, state_is_tuple=True)
return seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp,
cell,
num_decoder_symbols,
embedding_size=2,
feed_previous=feed_previous)
def EmbeddingTiedRNNSeq2SeqNoTuple(enc_inp, dec_inp, feed_previous):
cell = core_rnn_cell_impl.BasicLSTMCell(2, state_is_tuple=False)
return seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp,
cell,
num_decoder_symbols,
embedding_size=2,
feed_previous=feed_previous)
def EmbeddingAttentionSeq2Seq(enc_inp, dec_inp, feed_previous):
cell = core_rnn_cell_impl.BasicLSTMCell(2, state_is_tuple=True)
return seq2seq_lib.embedding_attention_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols,
num_decoder_symbols,
embedding_size=2,
feed_previous=feed_previous)
def EmbeddingAttentionSeq2SeqNoTuple(enc_inp, dec_inp, feed_previous):
cell = core_rnn_cell_impl.BasicLSTMCell(2, state_is_tuple=False)
return seq2seq_lib.embedding_attention_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols,
num_decoder_symbols,
embedding_size=2,
feed_previous=feed_previous)
for model in (EmbeddingRNNSeq2SeqF, EmbeddingRNNSeq2SeqNoTupleF,
EmbeddingTiedRNNSeq2Seq, EmbeddingTiedRNNSeq2SeqNoTuple,
EmbeddingAttentionSeq2Seq, EmbeddingAttentionSeq2SeqNoTuple):
TestModel(model)
if __name__ == "__main__":
test.main()
|
apache-2.0
| -8,209,832,853,401,231,000 | 9,210,941,453,221,846,000 | 38.09294 | 96 | 0.573048 | false |
qizenguf/MLC-STT
|
src/mem/AddrMapper.py
|
69
|
3530
|
# Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Hansson
from m5.params import *
from MemObject import MemObject
# An address mapper changes the packet addresses in going from the
# slave port side of the mapper to the master port side. When the
# slave port is queried for the address ranges, it also performs the
# necessary range updates. Note that snoop requests that travel from
# the master port (i.e. the memory side) to the slave port are
# currently not modified.
class AddrMapper(MemObject):
type = 'AddrMapper'
cxx_header = 'mem/addr_mapper.hh'
abstract = True
# one port in each direction
master = MasterPort("Master port")
slave = SlavePort("Slave port")
# Range address mapper that maps a set of original ranges to a set of
# remapped ranges, where a specific range is of the same size
# (original and remapped), only with an offset.
class RangeAddrMapper(AddrMapper):
type = 'RangeAddrMapper'
cxx_header = 'mem/addr_mapper.hh'
# These two vectors should be the exact same length and each range
# should be the exact same size. Each range in original_ranges is
# mapped to the corresponding element in the remapped_ranges. Note
# that the same range can occur multiple times in the remapped
# ranges for address aliasing.
original_ranges = VectorParam.AddrRange(
"Ranges of memory that should me remapped")
remapped_ranges = VectorParam.AddrRange(
"Ranges of memory that are being mapped to")
|
bsd-3-clause
| -46,460,134,038,600,250 | 1,500,607,412,038,484,700 | 48.027778 | 72 | 0.770822 | false |
9929105/KEEP
|
keep_backend/tests/test_api/test_data_api.py
|
2
|
3603
|
'''
Makes HTTP requests to each of our Data API functions to ensure there are
no templating/etc errors on the pages.
'''
import json
from tests import ApiTestCase
class DataApiV1KeyTests( ApiTestCase ):
AUTH_DETAILS = { 'format': 'json',
'user': 'admin',
'key': '35f7d1fb1890bdc05f9988d01cf1dcab' }
INVALID_AUTH = { 'format': 'json',
'user': 'admin',
'key': 'invalid_key' }
AUTH_DETAILS_OTHER = { 'format': 'json',
'user': 'test_user',
'key': '35f7d1fb1890bdc05f9988d01cf1dcab' }
def test_data_detail( self ):
'''
Test if we can list the repo details for a specific repo for a
test user.
'''
# Get the list of repos for the test user
repos = self.open( '/repos/', self.AUTH_DETAILS )
repos = json.loads( repos.content )
CSV_AUTH = dict( self.AUTH_DETAILS )
CSV_AUTH[ 'format' ] = 'csv'
for repo in repos.get( 'objects' ):
response = self.open( '/data/%s/' % ( repo.get( 'id' ) ), self.AUTH_DETAILS )
assert response.status_code == 200
response = self.open( '/data/%s/' % ( repo.get( 'id' ) ), CSV_AUTH )
assert response.status_code == 200
def test_data_detail_failures( self ):
'''
Test failure state when querying for repo data under a
non-permitted user
'''
# Get the list of repos for the test user
repos = self.open( '/repos/', self.AUTH_DETAILS )
repos = json.loads( repos.content )
for repo in repos.get( 'objects' ):
# Test valid user/key
response = self.open( '/data/%s/' % ( repo.get( 'id' ) ), self.AUTH_DETAILS_OTHER )
if repo.get( 'is_public' ):
assert response.status_code == 200
else:
assert response.status_code == 401
# Test invalid user/key
response = self.open( '/data/%s/' % ( repo.get( 'id' ) ), self.INVALID_AUTH )
assert response.status_code == 401
def test_data_post( self ):
'''
Test if we can successfully post data to the API for a repo.
'''
# Get the list of repos for the test user
repos = self.open( '/repos/', self.AUTH_DETAILS )
repos = json.loads( repos.content )
for repo in repos.get( 'objects' ):
# Grab the list of datapoints for this repo
response = self.open( '/data/%s/' % ( repo.get( 'id' ) ), self.AUTH_DETAILS )
response = json.loads( response.content )
beforeCount = response.get( 'meta' ).get( 'count' )
new_data = { 'name': 'Bob Dole',
'age': 20,
'gender': 'male',
'user': self.AUTH_DETAILS[ 'user' ],
'key': self.AUTH_DETAILS[ 'key' ],
'format': self.AUTH_DETAILS[ 'format' ] }
response = self.open( '/repos/%s/' % ( repo.get( 'id' ) ),
new_data,
method='POST' )
response = json.loads( response.content )
assert response.get( 'success' )
response = self.open( '/data/%s/' % ( repo.get( 'id' ) ), self.AUTH_DETAILS )
response = json.loads( response.content )
assert response.get( 'meta' ).get( 'count' ) == ( beforeCount + 1 )
|
mit
| -8,240,057,048,352,678,000 | -2,195,663,091,364,988,000 | 35.03 | 95 | 0.498751 | false |
nickoe/asciidoc
|
asciidocapi.py
|
85
|
8424
|
#!/usr/bin/env python
"""
asciidocapi - AsciiDoc API wrapper class.
The AsciiDocAPI class provides an API for executing asciidoc. Minimal example
compiles `mydoc.txt` to `mydoc.html`:
import asciidocapi
asciidoc = asciidocapi.AsciiDocAPI()
asciidoc.execute('mydoc.txt')
- Full documentation in asciidocapi.txt.
- See the doctests below for more examples.
Doctests:
1. Check execution:
>>> import StringIO
>>> infile = StringIO.StringIO('Hello *{author}*')
>>> outfile = StringIO.StringIO()
>>> asciidoc = AsciiDocAPI()
>>> asciidoc.options('--no-header-footer')
>>> asciidoc.attributes['author'] = 'Joe Bloggs'
>>> asciidoc.execute(infile, outfile, backend='html4')
>>> print outfile.getvalue()
<p>Hello <strong>Joe Bloggs</strong></p>
>>> asciidoc.attributes['author'] = 'Bill Smith'
>>> infile = StringIO.StringIO('Hello _{author}_')
>>> outfile = StringIO.StringIO()
>>> asciidoc.execute(infile, outfile, backend='docbook')
>>> print outfile.getvalue()
<simpara>Hello <emphasis>Bill Smith</emphasis></simpara>
2. Check error handling:
>>> import StringIO
>>> asciidoc = AsciiDocAPI()
>>> infile = StringIO.StringIO('---------')
>>> outfile = StringIO.StringIO()
>>> asciidoc.execute(infile, outfile)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "asciidocapi.py", line 189, in execute
raise AsciiDocError(self.messages[-1])
AsciiDocError: ERROR: <stdin>: line 1: [blockdef-listing] missing closing delimiter
Copyright (C) 2009 Stuart Rackham. Free use of this software is granted
under the terms of the GNU General Public License (GPL).
"""
import sys,os,re,imp
API_VERSION = '0.1.2'
MIN_ASCIIDOC_VERSION = '8.4.1' # Minimum acceptable AsciiDoc version.
def find_in_path(fname, path=None):
"""
Find file fname in paths. Return None if not found.
"""
if path is None:
path = os.environ.get('PATH', '')
for dir in path.split(os.pathsep):
fpath = os.path.join(dir, fname)
if os.path.isfile(fpath):
return fpath
else:
return None
class AsciiDocError(Exception):
pass
class Options(object):
"""
Stores asciidoc(1) command options.
"""
def __init__(self, values=[]):
self.values = values[:]
def __call__(self, name, value=None):
"""Shortcut for append method."""
self.append(name, value)
def append(self, name, value=None):
if type(value) in (int,float):
value = str(value)
self.values.append((name,value))
class Version(object):
"""
Parse and compare AsciiDoc version numbers. Instance attributes:
string: String version number '<major>.<minor>[.<micro>][suffix]'.
major: Integer major version number.
minor: Integer minor version number.
micro: Integer micro version number.
suffix: Suffix (begins with non-numeric character) is ignored when
comparing.
Doctest examples:
>>> Version('8.2.5') < Version('8.3 beta 1')
True
>>> Version('8.3.0') == Version('8.3. beta 1')
True
>>> Version('8.2.0') < Version('8.20')
True
>>> Version('8.20').major
8
>>> Version('8.20').minor
20
>>> Version('8.20').micro
0
>>> Version('8.20').suffix
''
>>> Version('8.20 beta 1').suffix
'beta 1'
"""
def __init__(self, version):
self.string = version
reo = re.match(r'^(\d+)\.(\d+)(\.(\d+))?\s*(.*?)\s*$', self.string)
if not reo:
raise ValueError('invalid version number: %s' % self.string)
groups = reo.groups()
self.major = int(groups[0])
self.minor = int(groups[1])
self.micro = int(groups[3] or '0')
self.suffix = groups[4] or ''
def __cmp__(self, other):
result = cmp(self.major, other.major)
if result == 0:
result = cmp(self.minor, other.minor)
if result == 0:
result = cmp(self.micro, other.micro)
return result
class AsciiDocAPI(object):
"""
AsciiDoc API class.
"""
def __init__(self, asciidoc_py=None):
"""
Locate and import asciidoc.py.
Initialize instance attributes.
"""
self.options = Options()
self.attributes = {}
self.messages = []
# Search for the asciidoc command file.
# Try ASCIIDOC_PY environment variable first.
cmd = os.environ.get('ASCIIDOC_PY')
if cmd:
if not os.path.isfile(cmd):
raise AsciiDocError('missing ASCIIDOC_PY file: %s' % cmd)
elif asciidoc_py:
# Next try path specified by caller.
cmd = asciidoc_py
if not os.path.isfile(cmd):
raise AsciiDocError('missing file: %s' % cmd)
else:
# Try shell search paths.
for fname in ['asciidoc.py','asciidoc.pyc','asciidoc']:
cmd = find_in_path(fname)
if cmd: break
else:
# Finally try current working directory.
for cmd in ['asciidoc.py','asciidoc.pyc','asciidoc']:
if os.path.isfile(cmd): break
else:
raise AsciiDocError('failed to locate asciidoc')
self.cmd = os.path.realpath(cmd)
self.__import_asciidoc()
def __import_asciidoc(self, reload=False):
'''
Import asciidoc module (script or compiled .pyc).
See
http://groups.google.com/group/asciidoc/browse_frm/thread/66e7b59d12cd2f91
for an explanation of why a seemingly straight-forward job turned out
quite complicated.
'''
if os.path.splitext(self.cmd)[1] in ['.py','.pyc']:
sys.path.insert(0, os.path.dirname(self.cmd))
try:
try:
if reload:
import __builtin__ # Because reload() is shadowed.
__builtin__.reload(self.asciidoc)
else:
import asciidoc
self.asciidoc = asciidoc
except ImportError:
raise AsciiDocError('failed to import ' + self.cmd)
finally:
del sys.path[0]
else:
# The import statement can only handle .py or .pyc files, have to
# use imp.load_source() for scripts with other names.
try:
imp.load_source('asciidoc', self.cmd)
import asciidoc
self.asciidoc = asciidoc
except ImportError:
raise AsciiDocError('failed to import ' + self.cmd)
if Version(self.asciidoc.VERSION) < Version(MIN_ASCIIDOC_VERSION):
raise AsciiDocError(
'asciidocapi %s requires asciidoc %s or better'
% (API_VERSION, MIN_ASCIIDOC_VERSION))
def execute(self, infile, outfile=None, backend=None):
"""
Compile infile to outfile using backend format.
infile can outfile can be file path strings or file like objects.
"""
self.messages = []
opts = Options(self.options.values)
if outfile is not None:
opts('--out-file', outfile)
if backend is not None:
opts('--backend', backend)
for k,v in self.attributes.items():
if v == '' or k[-1] in '!@':
s = k
elif v is None: # A None value undefines the attribute.
s = k + '!'
else:
s = '%s=%s' % (k,v)
opts('--attribute', s)
args = [infile]
# The AsciiDoc command was designed to process source text then
# exit, there are globals and statics in asciidoc.py that have
# to be reinitialized before each run -- hence the reload.
self.__import_asciidoc(reload=True)
try:
try:
self.asciidoc.execute(self.cmd, opts.values, args)
finally:
self.messages = self.asciidoc.messages[:]
except SystemExit, e:
if e.code:
raise AsciiDocError(self.messages[-1])
if __name__ == "__main__":
"""
Run module doctests.
"""
import doctest
options = doctest.NORMALIZE_WHITESPACE + doctest.ELLIPSIS
doctest.testmod(optionflags=options)
|
gpl-2.0
| -7,448,942,287,910,984,000 | -1,636,149,398,558,169,600 | 31.77821 | 86 | 0.567426 | false |
arcticshores/kivy
|
kivy/core/image/img_pil.py
|
37
|
3535
|
'''
PIL: PIL image loader
'''
__all__ = ('ImageLoaderPIL', )
try:
from PIL import Image as PILImage
except:
import Image as PILImage
from kivy.logger import Logger
from kivy.core.image import ImageLoaderBase, ImageData, ImageLoader
class ImageLoaderPIL(ImageLoaderBase):
'''Image loader based on the PIL library.
.. versionadded:: 1.0.8
Support for GIF animation added.
Gif animation has a lot of issues(transparency/color depths... etc).
In order to keep it simple, what is implimented here is what is
natively supported by the PIL library.
As a general rule, try to use gifs that have no transparency.
Gif's with transparency will work but be prepared for some
artifacts until transparency support is improved.
'''
@staticmethod
def can_save():
return True
@staticmethod
def can_load_memory():
return True
@staticmethod
def extensions():
'''Return accepted extensions for this loader'''
# See http://www.pythonware.com/library/pil/handbook/index.htm
return ('bmp', 'bufr', 'cur', 'dcx', 'fits', 'fl', 'fpx', 'gbr',
'gd', 'gif', 'grib', 'hdf5', 'ico', 'im', 'imt', 'iptc',
'jpeg', 'jpg', 'jpe', 'mcidas', 'mic', 'mpeg', 'msp',
'pcd', 'pcx', 'pixar', 'png', 'ppm', 'psd', 'sgi',
'spider', 'tga', 'tiff', 'wal', 'wmf', 'xbm', 'xpm',
'xv')
def _img_correct(self, _img_tmp):
'''Convert image to the correct format and orientation.
'''
# image loader work only with rgb/rgba image
if _img_tmp.mode.lower() not in ('rgb', 'rgba'):
try:
imc = _img_tmp.convert('RGBA')
except:
Logger.warning(
'Image: Unable to convert image to rgba (was %s)' %
(_img_tmp.mode.lower()))
raise
_img_tmp = imc
return _img_tmp
def _img_read(self, im):
'''Read images from an animated file.
'''
im.seek(0)
# Read all images inside
try:
img_ol = None
while True:
img_tmp = im
img_tmp = self._img_correct(img_tmp)
if img_ol and (hasattr(im, 'dispose') and not im.dispose):
# paste new frame over old so as to handle
# transparency properly
img_ol.paste(img_tmp, (0, 0), img_tmp)
img_tmp = img_ol
img_ol = img_tmp
yield ImageData(img_tmp.size[0], img_tmp.size[1],
img_tmp.mode.lower(), img_tmp.tostring())
im.seek(im.tell() + 1)
except EOFError:
pass
def load(self, filename):
try:
im = PILImage.open(filename)
except:
Logger.warning('Image: Unable to load image <%s>' % filename)
raise
# update internals
if not self._inline:
self.filename = filename
# returns an array of type ImageData len 1 if not a sequence image
return list(self._img_read(im))
@staticmethod
def save(filename, width, height, fmt, pixels, flipped=False):
image = PILImage.fromstring(fmt.upper(), (width, height), pixels)
if flipped:
image = image.transpose(PILImage.FLIP_TOP_BOTTOM)
image.save(filename)
return True
# register
ImageLoader.register(ImageLoaderPIL)
|
mit
| -8,128,327,960,140,617,000 | 3,175,220,231,427,503,000 | 30.283186 | 74 | 0.54512 | false |
ksachs/invenio
|
modules/bibindex/lib/bibindex_engine.py
|
1
|
101146
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009,
## 2010, 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
BibIndex indexing engine implementation.
See bibindex executable for entry point.
"""
__revision__ = "$Id$"
import re
import sys
import time
import fnmatch
import inspect
from datetime import datetime
from invenio.config import CFG_SOLR_URL
from invenio.bibindex_engine_config import CFG_MAX_MYSQL_THREADS, \
CFG_MYSQL_THREAD_TIMEOUT, \
CFG_CHECK_MYSQL_THREADS, \
CFG_BIBINDEX_INDEX_TABLE_TYPE, \
CFG_BIBINDEX_ADDING_RECORDS_STARTED_STR, \
CFG_BIBINDEX_UPDATE_MESSAGE, \
CFG_BIBINDEX_UPDATE_MODE, \
CFG_BIBINDEX_TOKENIZER_TYPE, \
CFG_BIBINDEX_WASH_INDEX_TERMS, \
CFG_BIBINDEX_SPECIAL_TAGS
from invenio.bibauthority_config import \
CFG_BIBAUTHORITY_CONTROLLED_FIELDS_BIBLIOGRAPHIC
from invenio.bibauthority_engine import \
get_control_nos_from_recID
from invenio.search_engine import perform_request_search, \
get_index_stemming_language, \
get_synonym_terms, \
search_pattern, \
search_unit_in_bibrec
from invenio.dbquery import run_sql, DatabaseError, serialize_via_marshal, \
deserialize_via_marshal, wash_table_column_name
from invenio.bibindex_engine_washer import wash_index_term
from invenio.bibtask import task_init, write_message, get_datetime, \
task_set_option, task_get_option, task_get_task_param, \
task_update_progress, task_sleep_now_if_required
from invenio.intbitset import intbitset
from invenio.errorlib import register_exception
from invenio.solrutils_bibindex_indexer import solr_commit
from invenio.bibindex_tokenizers.BibIndexJournalTokenizer import \
CFG_JOURNAL_TAG, \
CFG_JOURNAL_PUBINFO_STANDARD_FORM, \
CFG_JOURNAL_PUBINFO_STANDARD_FORM_REGEXP_CHECK
from invenio.bibindex_termcollectors import TermCollector
from invenio.bibindex_engine_utils import load_tokenizers, \
get_all_index_names_and_column_values, \
get_index_tags, \
get_field_tags, \
get_marc_tag_indexes, \
get_nonmarc_tag_indexes, \
get_all_indexes, \
get_index_virtual_indexes, \
get_virtual_index_building_blocks, \
get_index_id_from_index_name, \
run_sql_drop_silently, \
get_min_last_updated, \
remove_inexistent_indexes, \
get_all_synonym_knowledge_bases, \
get_index_remove_stopwords, \
get_index_remove_html_markup, \
get_index_remove_latex_markup, \
filter_for_virtual_indexes, \
get_records_range_for_index, \
make_prefix, \
list_union, \
recognize_marc_tag
from invenio.bibindex_termcollectors import \
TermCollector, \
NonmarcTermCollector
from invenio.memoiseutils import Memoise
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
## precompile some often-used regexp for speed reasons:
re_subfields = re.compile('\$\$\w')
re_datetime_shift = re.compile("([-\+]{0,1})([\d]+)([dhms])")
re_prefix = re.compile('__[a-zA-Z1-9]*__')
nb_char_in_line = 50 # for verbose pretty printing
chunksize = 1000 # default size of chunks that the records will be treated by
base_process_size = 4500 # process base size
_last_word_table = None
_TOKENIZERS = load_tokenizers()
def list_unique(_list):
"""Returns a _list with duplicates removed."""
_dict = {}
for e in _list:
_dict[e] = 1
return _dict.keys()
## safety function for killing slow DB threads:
def kill_sleepy_mysql_threads(max_threads=CFG_MAX_MYSQL_THREADS,
thread_timeout=CFG_MYSQL_THREAD_TIMEOUT):
"""Check the number of DB threads and if there are more than
MAX_THREADS of them, lill all threads that are in a sleeping
state for more than THREAD_TIMEOUT seconds. (This is useful
for working around the the max_connection problem that appears
during indexation in some not-yet-understood cases.) If some
threads are to be killed, write info into the log file.
"""
res = run_sql("SHOW FULL PROCESSLIST")
if len(res) > max_threads:
for row in res:
r_id, dummy, dummy, dummy, r_command, r_time, dummy, dummy = row
if r_command == "Sleep" and int(r_time) > thread_timeout:
run_sql("KILL %s", (r_id, ))
write_message("WARNING: too many DB threads, " + \
"killing thread %s" % r_id, verbose=1)
return
def get_associated_subfield_value(recID, tag, value, associated_subfield_code):
"""Return list of ASSOCIATED_SUBFIELD_CODE, if exists, for record
RECID and TAG of value VALUE. Used by fulltext indexer only.
Note: TAG must be 6 characters long (tag+ind1+ind2+sfcode),
otherwise en empty string is returned.
FIXME: what if many tag values have the same value but different
associated_subfield_code? Better use bibrecord library for this.
"""
out = ""
if len(tag) != 6:
return out
bibXXx = "bib" + tag[0] + tag[1] + "x"
bibrec_bibXXx = "bibrec_" + bibXXx
query = """SELECT bb.field_number, b.tag, b.value FROM %s AS b, %s AS bb
WHERE bb.id_bibrec=%%s AND bb.id_bibxxx=b.id AND tag LIKE
%%s%%""" % (bibXXx, bibrec_bibXXx)
res = run_sql(query, (recID, tag[:-1]))
field_number = -1
for row in res:
if row[1] == tag and row[2] == value:
field_number = row[0]
if field_number > 0:
for row in res:
if row[0] == field_number and row[1] == tag[:-1] + associated_subfield_code:
out = row[2]
break
return out
def get_author_canonical_ids_for_recid(recID):
"""
Return list of author canonical IDs (e.g. `J.Ellis.1') for the
given record. Done by consulting BibAuthorID module.
"""
return [word[0] for word in run_sql("""SELECT data FROM aidPERSONIDDATA
JOIN aidPERSONIDPAPERS USING (personid) WHERE bibrec=%s AND
tag='canonical_name' AND flag>-2""", (recID, ))]
def swap_temporary_reindex_tables(index_id, reindex_prefix="tmp_"):
"""Atomically swap reindexed temporary table with the original one.
Delete the now-old one."""
write_message("Putting new tmp index tables " + \
"for id %s into production" % index_id)
run_sql(
"RENAME TABLE " +
"idxWORD%02dR TO old_idxWORD%02dR," % (index_id, index_id) +
"%sidxWORD%02dR TO idxWORD%02dR," % (reindex_prefix, index_id, index_id) +
"idxWORD%02dF TO old_idxWORD%02dF," % (index_id, index_id) +
"%sidxWORD%02dF TO idxWORD%02dF," % (reindex_prefix, index_id, index_id) +
"idxPAIR%02dR TO old_idxPAIR%02dR," % (index_id, index_id) +
"%sidxPAIR%02dR TO idxPAIR%02dR," % (reindex_prefix, index_id, index_id) +
"idxPAIR%02dF TO old_idxPAIR%02dF," % (index_id, index_id) +
"%sidxPAIR%02dF TO idxPAIR%02dF," % (reindex_prefix, index_id, index_id) +
"idxPHRASE%02dR TO old_idxPHRASE%02dR," % (index_id, index_id) +
"%sidxPHRASE%02dR TO idxPHRASE%02dR," % (reindex_prefix, index_id, index_id) +
"idxPHRASE%02dF TO old_idxPHRASE%02dF," % (index_id, index_id) +
"%sidxPHRASE%02dF TO idxPHRASE%02dF;" % (reindex_prefix, index_id, index_id)
)
write_message("Dropping old index tables for id %s" % index_id)
run_sql_drop_silently("""DROP TABLE old_idxWORD%02dR,
old_idxWORD%02dF,
old_idxPAIR%02dR,
old_idxPAIR%02dF,
old_idxPHRASE%02dR,
old_idxPHRASE%02dF""" % ((index_id, )* 6)
) # kwalitee: disable=sql
def init_temporary_reindex_tables(index_id, reindex_prefix="tmp_"):
"""Create reindexing temporary tables."""
write_message("Creating new tmp index tables for id %s" % index_id)
query = """DROP TABLE IF EXISTS %sidxWORD%02dF""" % \
(wash_table_column_name(reindex_prefix), index_id)
run_sql_drop_silently(query) # kwalitee: disable=sql
run_sql("""CREATE TABLE %sidxWORD%02dF (
id mediumint(9) unsigned NOT NULL auto_increment,
term varchar(50) default NULL,
hitlist longblob,
PRIMARY KEY (id),
UNIQUE KEY term (term)
) ENGINE=MyISAM""" % (reindex_prefix, index_id))
query = """DROP TABLE IF EXISTS %sidxWORD%02dR""" % \
(wash_table_column_name(reindex_prefix), index_id)
run_sql_drop_silently(query) # kwalitee: disable=sql
run_sql("""CREATE TABLE %sidxWORD%02dR (
id_bibrec mediumint(9) unsigned NOT NULL,
termlist longblob,
type enum('CURRENT','FUTURE','TEMPORARY') NOT NULL default 'CURRENT',
PRIMARY KEY (id_bibrec,type)
) ENGINE=MyISAM""" % (reindex_prefix, index_id))
query = """DROP TABLE IF EXISTS %sidxPAIR%02dF""" % \
(wash_table_column_name(reindex_prefix), index_id)
run_sql_drop_silently(query) # kwalitee: disable=sql
run_sql("""CREATE TABLE %sidxPAIR%02dF (
id mediumint(9) unsigned NOT NULL auto_increment,
term varchar(100) default NULL,
hitlist longblob,
PRIMARY KEY (id),
UNIQUE KEY term (term)
) ENGINE=MyISAM""" % (reindex_prefix, index_id))
query = """DROP TABLE IF EXISTS %sidxPAIR%02dR""" % \
(wash_table_column_name(reindex_prefix), index_id)
run_sql_drop_silently(query) # kwalitee: disable=sql
run_sql("""CREATE TABLE %sidxPAIR%02dR (
id_bibrec mediumint(9) unsigned NOT NULL,
termlist longblob,
type enum('CURRENT','FUTURE','TEMPORARY') NOT NULL default 'CURRENT',
PRIMARY KEY (id_bibrec,type)
) ENGINE=MyISAM""" % (reindex_prefix, index_id))
query = """DROP TABLE IF EXISTS %sidxPHRASE%02dF""" % \
(wash_table_column_name(reindex_prefix), index_id)
run_sql_drop_silently(query) # kwalitee: disable=sql
run_sql("""CREATE TABLE %sidxPHRASE%02dF (
id mediumint(9) unsigned NOT NULL auto_increment,
term text default NULL,
hitlist longblob,
PRIMARY KEY (id),
KEY term (term(50))
) ENGINE=MyISAM""" % (reindex_prefix, index_id))
query = """DROP TABLE IF EXISTS %sidxPHRASE%02dR""" % \
(wash_table_column_name(reindex_prefix), index_id)
run_sql_drop_silently(query) # kwalitee: disable=sql
run_sql("""CREATE TABLE %sidxPHRASE%02dR (
id_bibrec mediumint(9) unsigned NOT NULL default '0',
termlist longblob,
type enum('CURRENT','FUTURE','TEMPORARY') NOT NULL default 'CURRENT',
PRIMARY KEY (id_bibrec,type)
) ENGINE=MyISAM""" % (reindex_prefix, index_id))
def remove_subfields(s):
"Removes subfields from string, e.g. 'foo $$c bar' becomes 'foo bar'."
return re_subfields.sub(' ', s)
def get_field_indexes(field):
"""Returns indexes names and ids corresponding to the given field"""
if recognize_marc_tag(field):
#field is actually a tag
return get_marc_tag_indexes(field, virtual=False)
else:
return get_nonmarc_tag_indexes(field, virtual=False)
get_field_indexes_memoised = Memoise(get_field_indexes)
def get_index_tokenizer(index_id):
"""Returns value of a tokenizer field from idxINDEX database table
@param index_id: id of the index
"""
query = "SELECT tokenizer FROM idxINDEX WHERE id=%s" % index_id
out = None
try:
res = run_sql(query)
if res:
out = _TOKENIZERS[res[0][0]]
except DatabaseError:
write_message("Exception caught for SQL statement: %s; " + \
"column tokenizer might not exist" % query, sys.stderr)
except KeyError:
write_message("Exception caught: there is no such tokenizer")
out = None
return out
def detect_tokenizer_type(tokenizer):
"""
Checks what is the main type of the tokenizer.
For more information on tokenizer types take
a look at BibIndexTokenizer class.
@param tokenizer: instance of a tokenizer
"""
from invenio.bibindex_tokenizers.BibIndexStringTokenizer import BibIndexStringTokenizer
from invenio.bibindex_tokenizers.BibIndexRecJsonTokenizer import BibIndexRecJsonTokenizer
from invenio.bibindex_tokenizers.BibIndexMultiFieldTokenizer import BibIndexMultiFieldTokenizer
tokenizer_inheritance_tree = inspect.getmro(tokenizer.__class__)
if BibIndexStringTokenizer in tokenizer_inheritance_tree:
return CFG_BIBINDEX_TOKENIZER_TYPE['string']
if BibIndexMultiFieldTokenizer in tokenizer_inheritance_tree:
return CFG_BIBINDEX_TOKENIZER_TYPE['multifield']
if BibIndexRecJsonTokenizer in tokenizer_inheritance_tree:
return CFG_BIBINDEX_TOKENIZER_TYPE['recjson']
return CFG_BIBINDEX_TOKENIZER_TYPE['unknown']
def get_last_updated_all_indexes():
"""Returns last modification date for all defined indexes"""
query= """SELECT name, last_updated FROM idxINDEX"""
res = run_sql(query)
return res
def split_ranges(parse_string):
"""Parse a string a return the list or ranges."""
recIDs = []
ranges = parse_string.split(",")
for arange in ranges:
tmp_recIDs = arange.split("-")
if len(tmp_recIDs) == 1:
recIDs.append([int(tmp_recIDs[0]), int(tmp_recIDs[0])])
else:
if int(tmp_recIDs[0]) > int(tmp_recIDs[1]): # sanity check
tmp = tmp_recIDs[0]
tmp_recIDs[0] = tmp_recIDs[1]
tmp_recIDs[1] = tmp
recIDs.append([int(tmp_recIDs[0]), int(tmp_recIDs[1])])
return recIDs
def get_date_range(var):
"Returns the two dates contained as a low,high tuple"
limits = var.split(",")
if len(limits) == 1:
low = get_datetime(limits[0])
return low, None
if len(limits) == 2:
low = get_datetime(limits[0])
high = get_datetime(limits[1])
return low, high
return None, None
def create_range_list(res):
"""Creates a range list from a recID select query result contained
in res. The result is expected to have ascending numerical order."""
if not res:
return []
row = res[0]
if not row:
return []
else:
range_list = [[row, row]]
for row in res[1:]:
row_id = row
if row_id == range_list[-1][1] + 1:
range_list[-1][1] = row_id
else:
range_list.append([row_id, row_id])
return range_list
def beautify_range_list(range_list):
"""Returns a non overlapping, maximal range list"""
ret_list = []
for new in range_list:
found = 0
for old in ret_list:
if new[0] <= old[0] <= new[1] + 1 or new[0] - 1 <= old[1] <= new[1]:
old[0] = min(old[0], new[0])
old[1] = max(old[1], new[1])
found = 1
break
if not found:
ret_list.append(new)
return ret_list
def truncate_index_table(index_name):
"""Properly truncate the given index."""
index_id = get_index_id_from_index_name(index_name)
if index_id:
write_message('Truncating %s index table in order to reindex.' % \
index_name, verbose=2)
run_sql("""UPDATE idxINDEX SET last_updated='0000-00-00 00:00:00'
WHERE id=%s""", (index_id, ))
run_sql("TRUNCATE idxWORD%02dF" % index_id) # kwalitee: disable=sql
run_sql("TRUNCATE idxWORD%02dR" % index_id) # kwalitee: disable=sql
run_sql("TRUNCATE idxPHRASE%02dF" % index_id) # kwalitee: disable=sql
run_sql("TRUNCATE idxPHRASE%02dR" % index_id) # kwalitee: disable=sql
def update_index_last_updated(indexes, starting_time=None):
"""Update last_updated column of the index table in the database.
Puts starting time there so that if the task
was interrupted for record download,
the records will be reindexed next time.
@param indexes: list of indexes names
"""
if starting_time is None:
return None
for index_name in indexes:
write_message("updating last_updated to %s...for %s index" % \
(starting_time, index_name), verbose=9)
run_sql("UPDATE idxINDEX SET last_updated=%s WHERE name=%s",
(starting_time, index_name))
def get_percentage_completed(num_done, num_total):
""" Return a string containing the approx. percentage completed """
percentage_remaining = 100.0 * float(num_done) / float(num_total)
if percentage_remaining:
percentage_display = "(%.1f%%)" % (percentage_remaining, )
else:
percentage_display = ""
return percentage_display
def _fill_dict_of_indexes_with_empty_sets():
"""find_affected_records internal function.
Creates dict: {'index_name1':set([]), ...}
"""
index_dict = {}
tmp_all_indexes = get_all_indexes(virtual=False)
for index in tmp_all_indexes:
index_dict[index] = set([])
return index_dict
def find_affected_records_for_index(indexes=[], recIDs=[], force_all_indexes=False):
"""
Function checks which records need to be changed/reindexed
for given index/indexes.
Makes use of hstRECORD table where
different revisions of record are kept.
If parameter force_all_indexes is set
function will assign all recIDs to all indexes.
@param indexes: names of indexes for reindexation separated by coma
@param recIDs: recIDs for reindexation in form:
[[range1_down, range1_up],[range2_down, range2_up]..]
@param force_all_indexes: should we index all indexes?
"""
tmp_dates = dict(get_last_updated_all_indexes())
modification_dates = dict([(date, tmp_dates[date] or datetime(1000, 1, 1, 1, 1, 1))
for date in tmp_dates])
tmp_all_indexes = get_all_indexes(virtual=False)
indexes = remove_inexistent_indexes(indexes, leave_virtual=False)
if not indexes:
return {}
def _should_reindex_for_revision(index_name, revision_date):
try:
if modification_dates[index_name] < revision_date and \
index_name in indexes:
return True
return False
except KeyError:
return False
if force_all_indexes:
records_for_indexes = {}
all_recIDs = []
for recIDs_range in recIDs:
all_recIDs.extend(range(recIDs_range[0], recIDs_range[1]+1))
if all_recIDs:
for index in indexes:
records_for_indexes[index] = all_recIDs
return records_for_indexes
min_last_updated = get_min_last_updated(indexes)[0][0] or \
datetime(1000, 1, 1, 1, 1, 1)
indexes_to_change = _fill_dict_of_indexes_with_empty_sets()
recIDs_info = []
for recIDs_range in recIDs:
query = """SELECT id_bibrec,job_date,affected_fields FROM hstRECORD
WHERE id_bibrec BETWEEN %s AND %s AND
job_date > '%s'""" % \
(recIDs_range[0], recIDs_range[1], min_last_updated)
res = run_sql(query)
if res:
recIDs_info.extend(res)
for recID_info in recIDs_info:
recID, revision, affected_fields = recID_info
affected_fields = affected_fields.split(",")
indexes_for_recID = set()
for field in affected_fields:
if field:
field_indexes = get_field_indexes_memoised(field) or []
indexes_names = set([idx[1] for idx in field_indexes])
indexes_for_recID |= indexes_names
else:
# record was inserted, all fields were changed,
# no specific affected fields
indexes_for_recID |= set(tmp_all_indexes)
indexes_for_recID_filtered = [ind for ind in indexes_for_recID if _should_reindex_for_revision(ind, revision)]
for index in indexes_for_recID_filtered:
indexes_to_change[index].add(recID)
indexes_to_change = dict((k, list(sorted(v))) for k, v in indexes_to_change.iteritems() if v)
return indexes_to_change
def chunk_generator(rng):
"""
Splits one range into several smaller ones
with respect to global chunksize variable.
@param rng: range of records
@type rng: list in the form: [1, 2000]
"""
global chunksize
current_low = rng[0]
current_high = rng[0]
if rng[0] == None or rng[1] == None:
raise StopIteration
if rng[1] - rng[0] + 1 <= chunksize:
yield rng
else:
while current_high - 1 < rng[1]:
current_high += chunksize
yield current_low, min(current_high - 1, rng[1])
current_low += chunksize
class AbstractIndexTable(object):
"""
This class represents an index table in database.
An index consists of three different kinds of tables:
table which stores only words in db,
table which stores pairs of words and
table which stores whole phrases.
The class represents only one table. Another instance of
the class must be created in order to store different
type of terms.
This class is an abstract class. It contains methods
to connect to db and methods which facilitate
inserting/modifing/removing terms from it. The class
also contains methods which help managing the memory.
All specific methods for indexing can be found in corresponding
classes for virtual and regular indexes.
"""
def __init__(self, index_name, table_type, table_prefix="", wash_index_terms=50):
self.index_name = index_name
self.index_id = get_index_id_from_index_name(index_name)
self.table_type = table_type
self.wash_index_terms = wash_index_terms
self.table_name = wash_table_column_name(table_prefix + \
"idx" + \
table_type + \
("%02d" % self.index_id) + "F")
self.table_prefix = table_prefix
self.value = {} # cache
self.recIDs_in_mem = []
def put_into_db(self, mode="normal"):
"""Updates the current words table in the corresponding DB
idxFOO table. Mode 'normal' means normal execution,
mode 'emergency' means words index reverting to old state.
"""
write_message("%s %s wordtable flush started" % \
(self.table_name, mode))
write_message('...updating %d words into %s started' % \
(len(self.value), self.table_name))
task_update_progress("(%s:%s) flushed %d/%d words" % \
(self.table_name, self.index_name, 0, len(self.value)))
self.recIDs_in_mem = beautify_range_list(self.recIDs_in_mem)
tab_name = self.table_name[:-1] + "R"
if mode == "normal":
for group in self.recIDs_in_mem:
query = """UPDATE %s SET type='TEMPORARY' WHERE id_bibrec
BETWEEN %%s AND %%s AND type='CURRENT'""" % tab_name
write_message(query % (group[0], group[1]), verbose=9)
run_sql(query, (group[0], group[1]))
nb_words_total = len(self.value)
nb_words_report = int(nb_words_total / 10.0)
nb_words_done = 0
for word in self.value.keys():
self.put_word_into_db(word)
nb_words_done += 1
if nb_words_report != 0 and ((nb_words_done % nb_words_report) == 0):
write_message('......processed %d/%d words' % \
(nb_words_done, nb_words_total))
percentage_display = get_percentage_completed(nb_words_done, nb_words_total)
task_update_progress("(%s:%s) flushed %d/%d words %s" % \
(tab_name, self.index_name,
nb_words_done, nb_words_total,
percentage_display))
write_message('...updating %d words into %s ended' % \
(nb_words_total, tab_name))
write_message('...updating reverse table %s started' % tab_name)
if mode == "normal":
for group in self.recIDs_in_mem:
query = """UPDATE %s SET type='CURRENT' WHERE id_bibrec
BETWEEN %%s AND %%s AND type='FUTURE'""" % tab_name
write_message(query % (group[0], group[1]), verbose=9)
run_sql(query, (group[0], group[1]))
query = """DELETE FROM %s WHERE id_bibrec
BETWEEN %%s AND %%s AND type='TEMPORARY'""" % tab_name
write_message(query % (group[0], group[1]), verbose=9)
run_sql(query, (group[0], group[1]))
write_message('End of updating wordTable into %s' % \
tab_name, verbose=9)
elif mode == "emergency":
for group in self.recIDs_in_mem:
query = """UPDATE %s SET type='CURRENT' WHERE id_bibrec
BETWEEN %%s AND %%s AND type='TEMPORARY'""" % tab_name
write_message(query % (group[0], group[1]), verbose=9)
run_sql(query, (group[0], group[1]))
query = """DELETE FROM %s WHERE id_bibrec
BETWEEN %%s AND %%s AND type='FUTURE'""" % tab_name
write_message(query % (group[0], group[1]), verbose=9)
run_sql(query, (group[0], group[1]))
write_message('End of emergency flushing wordTable into %s' % \
tab_name, verbose=9)
write_message('...updating reverse table %s ended' % tab_name)
self.clean()
self.recIDs_in_mem = []
write_message("%s %s wordtable flush ended" % \
(self.table_name, mode))
task_update_progress("(%s:%s) flush ended" % \
(self.table_name, self.index_name))
def put_word_into_db(self, word):
"""Flush a single word to the database and delete it from memory"""
set = self.load_old_recIDs(word)
if set is not None: # merge the word recIDs found in memory:
hitlist_was_changed = self.merge_with_old_recIDs(word, set)
if not hitlist_was_changed:
# nothing to update:
write_message("......... unchanged hitlist for ``%s''" % \
word, verbose=9)
else:
# yes there were some new words:
write_message("......... updating hitlist for ``%s''" % \
word, verbose=9)
run_sql("UPDATE %s SET hitlist=%%s WHERE term=%%s" % wash_table_column_name(self.table_name), (set.fastdump(), word)) # kwalitee: disable=sql
else: # the word is new, will create new set:
write_message("......... inserting hitlist for ``%s''" % \
word, verbose=9)
set = intbitset(self.value[word].keys())
try:
run_sql("INSERT INTO %s (term, hitlist) VALUES (%%s, %%s)" % wash_table_column_name(self.table_name), (word, set.fastdump())) # kwalitee: disable=sql
except Exception, e:
## We send this exception to the admin only when is not
## already reparing the problem.
register_exception(prefix="Error when putting the term '%s' into db (hitlist=%s): %s\n" % (repr(word), set, e), alert_admin=(task_get_option('cmd') != 'repair'))
if not set: # never store empty words
run_sql("DELETE FROM %s WHERE term=%%s" % wash_table_column_name(self.table_name), (word,)) # kwalitee: disable=sql
def put(self, recID, word, sign):
"""Keeps track of changes done during indexing
and stores these changes in memory for further use.
Indexing process needs this information later while
filling in the database.
@param recID: recID of the record we want to update in memory
@param word: word we want to update
@param sing: sign of the word, 1 means keep this word in database,
-1 remove word from database
"""
value = self.value
try:
if self.wash_index_terms:
word = wash_index_term(word, self.wash_index_terms)
if value.has_key(word):
# the word 'word' exist already: update sign
value[word][recID] = sign
else:
value[word] = {recID: sign}
except Exception as e:
write_message(
"Error: Cannot put word %s with sign %d for recID %s (%s)."
% (word, sign, recID, e)
)
def load_old_recIDs(self, word):
"""Load existing hitlist for the word from the database index files."""
query = "SELECT hitlist FROM %s WHERE term=%%s" % self.table_name
res = run_sql(query, (word, ))
if res:
return intbitset(res[0][0])
else:
return None
def merge_with_old_recIDs(self, word, set):
"""Merge the system numbers stored in memory
(hash of recIDs with value +1 or -1 according
to whether to add/delete them) with those stored
in the database index and received in set universe
of recIDs for the given word.
Return False in case no change was done to SET, return True in case SET
was changed.
"""
oldset = intbitset(set)
set.update_with_signs(self.value[word])
return set != oldset
def clean(self):
"Cleans the cache."
self.value = {}
class VirtualIndexTable(AbstractIndexTable):
"""
There are two types of indexes: virtual and regular/normal.
Check WordTable class for more on normal indexes.
This class represents a single index table for virtual index
(see also: AbstractIndexTable).
Virtual index doesn't store its own terms,
it accumulates terms from other indexes.
Good example of virtual index is the global index which stores
terms from title, abstract, keyword, author and so on.
This class contains methods for indexing virtual indexes.
See also: run_update()
"""
def __init__(self, index_name, table_type, table_prefix="", wash_index_terms=50):
"""
Creates VirtualIndexTable instance.
@param index_name: name of the index we want to reindex
@param table_type: words, pairs or phrases
@param table_prefix: add "tmp_" if you want to
reindex to temporary table
"""
AbstractIndexTable.__init__(self, index_name,
table_type,
table_prefix,
wash_index_terms)
self.mode = "normal"
self.dependent_indexes = dict(get_virtual_index_building_blocks(self.index_id))
def set_reindex_mode(self):
"""
Sets reindex mode. VirtualIndexTable will
remove all its content from database and
use insert_index function to repopulate it.
"""
self.mode = "reindex"
def run_update(self, flush=10000):
"""
Function starts all updating processes for virtual index.
It will take all information about pending changes from database
from queue tables (idxWORD/PAIR/PHRASExxQ), process them
and trigger appropriate indexing functions.
@param flush: how many records we will put in one go
into database (at most);
see also: opt_flush in WordTable class
"""
global chunksize
if self.mode == "reindex":
self.clean_database()
for index_id, index_name in self.dependent_indexes.iteritems():
rng = get_records_range_for_index(index_id)
flush_count = 0
if not rng:
continue
write_message('Virtual index: %s is being reindexed for %s index' % \
(self.index_name, index_name))
chunks = chunk_generator(rng)
try:
while True:
task_sleep_now_if_required()
chunk = chunks.next()
self.insert_index(index_id, chunk[0], chunk[1])
flush_count = flush_count + chunk[1] - chunk[0] + 1
self.recIDs_in_mem.append(list(chunk))
if flush_count >= flush:
flush_count = 0
self.put_into_db()
except StopIteration:
if flush_count > 0:
self.put_into_db()
self.clean_queue_table(index_name)
else:
for index_id, index_name in self.dependent_indexes.iteritems():
query = """SELECT id_bibrec_low, id_bibrec_high, mode FROM %s
WHERE index_name=%%s
ORDER BY runtime ASC""" % \
(self.table_name[:-1] + "Q")
entries = self.remove_duplicates(run_sql(query, (index_name, )))
if entries:
write_message('Virtual index: %s is being updated for %s index' % \
(self.index_name, index_name))
for entry in entries:
operation = None
recID_low, recID_high, mode = entry
if mode == CFG_BIBINDEX_UPDATE_MODE["Update"]:
operation = self.update_index
elif mode == CFG_BIBINDEX_UPDATE_MODE["Remove"]:
operation = self.remove_index
elif mode == CFG_BIBINDEX_UPDATE_MODE["Insert"]:
operation = self.insert_index
flush_count = 0
chunks = chunk_generator([recID_low, recID_high])
try:
while True:
task_sleep_now_if_required()
chunk = chunks.next()
operation(index_id, chunk[0], chunk[1])
flush_count = flush_count + chunk[1] - chunk[0] + 1
self.recIDs_in_mem.append(list(chunk))
if flush_count >= flush:
flush_count = 0
self.put_into_db()
except StopIteration:
if flush_count > 0:
self.put_into_db()
self.clean_queue_table(index_name)
def retrieve_new_values_from_index(self, index_id, records_range):
"""
Retrieves new values from dependent index
for specific range of records.
@param index_id: id of the dependent index
@param records_range: the smallest and the biggest id
in the range: [id_low, id_high]
"""
tab_name = "idx" + self.table_type + ("%02d" % index_id) + "R"
query = """SELECT id_bibrec, termlist FROM %s WHERE id_bibrec
BETWEEN %%s AND %%s""" % tab_name
new_regular_values = run_sql(query, (records_range[0], records_range[1]))
if new_regular_values:
zipped = zip(*new_regular_values)
new_regular_values = dict(zip(zipped[0], map(deserialize_via_marshal, zipped[1])))
else:
new_regular_values = dict()
return new_regular_values
def retrieve_old_values(self, records_range):
"""
Retrieves old values from database for this virtual index
for specific records range.
@param records_range: the smallest and the biggest id
in the range: [id_low, id_high]
"""
virtual_tab_name = self.table_name[:-1] + "R"
query = """SELECT id_bibrec, termlist FROM %s
WHERE type='CURRENT' AND
id_bibrec BETWEEN %%s AND %%s""" % virtual_tab_name
old_virtual_values = run_sql(query, (records_range[0], records_range[1]))
if old_virtual_values:
zipped = zip(*old_virtual_values)
old_virtual_values = dict(zip(zipped[0], map(deserialize_via_marshal, zipped[1])))
else:
old_virtual_values = dict()
return old_virtual_values
def update_index(self, index_id, recID_low, recID_high):
"""
Updates the state of virtual index for records in range:
recID_low, recID_high for index specified by index_id.
Function stores terms in idxWORD/PAIR/PHRASExxR tables with
prefixes for specific index, for example term 'ellis'
from author index will be stored in reversed table as:
'__author__ellis'. It allows fast operations on only part of terms
@param index_id: id of the dependent index we want to remove
@param recID_low: first recID from the range of considered recIDs
@param recID_high: last recID from the range of considered recIDs
"""
index_name = self.dependent_indexes[index_id]
update_cache_for_record = self.update_cache_for_record
virtual_tab_name = self.table_name[:-1] + "R"
# take new values
new_regular_values = self.retrieve_new_values_from_index(index_id, [recID_low, recID_high])
# take old values
old_virtual_values = self.retrieve_old_values([recID_low, recID_high])
# update reversed table
for recID in xrange(recID_low, recID_high + 1):
new_values = new_regular_values.get(recID) or []
old_values = old_virtual_values.get(recID) or []
to_serialize = update_cache_for_record(index_name, recID, old_values, new_values)
if len(to_serialize) == 0:
continue
run_sql("""INSERT INTO %s (id_bibrec,termlist,type)
VALUES (%%s,%%s,'FUTURE')""" % \
wash_table_column_name(virtual_tab_name),
(recID, serialize_via_marshal(to_serialize))) # kwalitee: disable=sql
try:
run_sql("INSERT INTO %s (id_bibrec,termlist,type) VALUES (%%s,%%s,'CURRENT')" % wash_table_column_name(virtual_tab_name), (recID, serialize_via_marshal([]))) # kwalitee: disable=sql
except DatabaseError:
pass
def insert_index(self, index_id, recID_low, recID_high):
"""
Inserts terms from dependent index to virtual table
without looking what's inside the virtual table and
what terms are being added. It's faster than 'updating',
but it can only be used when virtual table is free of
terms from this dependent index.
@param index_id: id of the dependent index we want to remove
@param recID_low: first recID from the range of considered recIDs
@param recID_high: last recID from the range of considered recIDs
"""
index_name = self.dependent_indexes[index_id]
insert_to_cache_for_record = self.insert_to_cache_for_record
virtual_tab_name = self.table_name[:-1] + "R"
# take new values
new_regular_values = self.retrieve_new_values_from_index(index_id, [recID_low, recID_high])
# take old values
old_virtual_values = self.retrieve_old_values([recID_low, recID_high])
# update reversed table
for recID in xrange(recID_low, recID_high + 1):
new_values = new_regular_values.get(recID) or []
old_values = old_virtual_values.get(recID) or []
to_serialize = insert_to_cache_for_record(index_name, recID, old_values, new_values)
if len(to_serialize) == 0:
continue
run_sql("INSERT INTO %s (id_bibrec,termlist,type) VALUES (%%s,%%s,'FUTURE')" % wash_table_column_name(virtual_tab_name), (recID, serialize_via_marshal(to_serialize))) # kwalitee: disable=sql
try:
run_sql("INSERT INTO %s (id_bibrec,termlist,type) VALUES (%%s,%%s,'CURRENT')" % wash_table_column_name(virtual_tab_name), (recID, serialize_via_marshal([]))) # kwalitee: disable=sql
except DatabaseError:
pass
def remove_index(self, index_id, recID_low, recID_high):
"""
Removes words found in dependent index from reversed
table of virtual index. Updates the state of the memory
(for future removal from forward table).
Takes into account that given words can be found in more
that one dependent index and it won't mark these words
for the removal process.
@param index_id: id of the dependent index we want to remove
@param recID_low: first recID from the range of considered recIDs
@param recID_high: last recID from the range of considered recIDs
"""
index_name = self.dependent_indexes[index_id]
remove_from_cache_for_record = self.remove_from_cache_for_record
virtual_tab_name = self.table_name[:-1] + "R"
# take old values
old_virtual_values = self.retrieve_old_values([recID_low, recID_high])
# update reversed table
for recID in xrange(recID_low, recID_high + 1):
old_values = old_virtual_values.get(recID) or []
to_serialize = remove_from_cache_for_record(index_name, recID, old_values)
if len(to_serialize) == 0:
continue
run_sql("INSERT INTO %s (id_bibrec,termlist,type) VALUES (%%s,%%s,'FUTURE')" % wash_table_column_name(virtual_tab_name), (recID, serialize_via_marshal(to_serialize))) # kwalitee: disable=sql
try:
run_sql("INSERT INTO %s (id_bibrec,termlist,type) VALUES (%%s,%%s,'CURRENT')" % wash_table_column_name(virtual_tab_name), (recID, serialize_via_marshal([]))) # kwalitee: disable=sql
except DatabaseError:
pass
def update_cache_for_record(self, index_name, recID, old_values, new_values):
"""
Updates memory (cache) with information on what to
remove/add/modify in forward table for specified record.
It also returns new terms which should be indexed for given record.
@param index_name: index name of dependent index
@param recID: considered record
@param old_values: all old values from all dependent indexes
for this virtual index for recID
@param new_values: new values from some dependent index
which should be added
"""
prefix = make_prefix(index_name)
put = self.put
new_values_prefix = [prefix + term for term in new_values]
part_values = []
tmp_old_values_prefix = []
# split old values from v.index into those with 'prefix' and those without
for term in old_values:
if term.startswith(prefix):
term_without_prefix = re.sub(re_prefix, '', term)
part_values.append(term_without_prefix)
put(recID, term_without_prefix, -1)
else:
tmp_old_values_prefix.append(term)
# remember not to remove words that occur more than once
part_values = set(part_values)
for value in tmp_old_values_prefix:
term_without_prefix = re.sub(re_prefix, '', value)
if term_without_prefix in part_values:
put(recID, term_without_prefix, 1)
for term_without_prefix in new_values:
put(recID, term_without_prefix, 1)
tmp_new_values_prefix = list(tmp_old_values_prefix)
tmp_new_values_prefix.extend(new_values_prefix)
return tmp_new_values_prefix
def insert_to_cache_for_record(self, index_name, recID, old_values, new_values):
"""
Updates cache with terms which should be inserted to database.
Used in insert_index function. See also: update_cache_for_record
which is analogous for update_index function.
"""
prefix = make_prefix(index_name)
append = old_values.append
put = self.put
for term in new_values:
append(prefix + term)
put(recID, term, 1)
return old_values
def remove_from_cache_for_record(self, index_name, recID, old_values):
"""
Updates information in cache with terms which should be removed
from virtual table. Used in remove_index function.
"""
prefix = make_prefix(index_name)
tmp_rest = []
tmp_removed = []
tmp_new_values = []
append_to_new = tmp_new_values.append
append_to_rest = tmp_rest.append
append_to_removed = tmp_removed.append
put = self.put
for term in old_values:
if term.startswith(prefix):
term_without_prefix = re.sub(re_prefix, '', term)
append_to_removed(term_without_prefix)
put(recID, term_without_prefix, -1)
else:
append_to_rest(re.sub(re_prefix, '', term))
append_to_new(term)
to_remember = set(tmp_rest) & set(tmp_removed)
for term_without_prefix in to_remember:
put(recID, term_without_prefix, 1)
return tmp_new_values
def clean_database(self):
"""Removes all entries from corresponding tables in database"""
query = """DELETE FROM %s""" % self.table_name
run_sql(query)
query = """DELETE FROM %s""" % self.table_name[:-1] + "R"
run_sql(query)
def clean_queue_table(self, index_name):
"""
Cleans queue table (i.e. idxWORD/PAIR/PHRASExxQ)
for specific index. It means that function will remove
all entries from db from queue table for this index.
"""
query = "DELETE FROM %s WHERE index_name='%s'" % \
(self.table_name[:-1].lstrip(self.table_prefix) + "Q",
index_name)
run_sql(query)
def remove_duplicates(self, entries):
"""
Removes duplicates from a list of entries (taken from Queue table)
in order to process a single command only once.
Queue table may look like this:
id (..) id_bibrec_low id_bibrec_high index_name mode
...
12 1 100 title update
13 1 100 title update
We don't want to perform the same operation twice. First we want to
squash the same commands into one.
@param entries: list of entries taken from the database
"""
unique = set()
return [entry for entry in entries if entry not in unique and not unique.add(entry)]
def remove_dependent_index(self, index_name):
"""
Removes dependent index from this virtual index.
It means removing all words from all records with prefix:
__index_name__ from reversed table, and removing some of
them from forward table if they don't appear in another
dependent index.
@param index_name: name of the dependent index to remove
"""
flush = 10000
dependent = self.dependent_indexes.values()
if len(dependent) == 0:
write_message("Specified index is not virtual...")
return
if index_name not in dependent:
write_message("Dependent index already removed...")
return
index_id = get_index_id_from_index_name(index_name)
records_range = get_records_range_for_index(index_id)
write_message("Removing an index: %s" % index_name)
if records_range:
flush_count = 0
chunks = chunk_generator([records_range[0], records_range[1]])
try:
while True:
task_sleep_now_if_required()
chunk = chunks.next()
self.remove_index(index_id, chunk[0], chunk[1])
flush_count = flush_count + chunk[1] - chunk[0] + 1
self.recIDs_in_mem.append(chunk)
if flush_count >= flush:
flush_count = 0
self.put_into_db()
except StopIteration:
if flush_count > 0:
self.put_into_db()
class WordTable(AbstractIndexTable):
"""
This class represents a single index table of regular index
(regular means it doesn't accumulates data from other indexes,
but it takes data directly from metadata of records which
are being indexed; for other type of index check: VirtualIndexTable).
To start indexing process one need to invoke add_recIDs() method.
For furher reading see description of this method.
"""
def __init__(self, index_name, table_type, table_prefix="", wash_index_terms=50):
"""Creates words table instance.
@param index_name: the index name
@param index_id: the index integer identificator
@param fields_to_index: a list of fields to index
@param table_type: type of the wordtable: Words, Pairs, Phrases
@param table_prefix: prefix for table name, indexing will be performed
on table: <<table_prefix>>idx<<wordtable_type>>XXF
@param wash_index_terms: do we wash index terms, and if yes (when >0),
how many characters do we keep in the index terms; see
max_char_length parameter of wash_index_term()
"""
AbstractIndexTable.__init__(self, index_name, table_type, table_prefix, wash_index_terms)
self.tags = get_index_tags(index_name, virtual=False)
self.nonmarc_tags = get_index_tags(index_name,
virtual=False,
tagtype="nonmarc")
self.timestamp = datetime.now()
self.virtual_indexes = get_index_virtual_indexes(self.index_id)
self.virtual_index_update_mode = CFG_BIBINDEX_UPDATE_MODE["Update"]
try:
self.stemming_language = get_index_stemming_language(self.index_id)
except KeyError:
self.stemming_language = ''
self.remove_stopwords = get_index_remove_stopwords(self.index_id)
self.remove_html_markup = get_index_remove_html_markup(self.index_id)
self.remove_latex_markup = get_index_remove_latex_markup(self.index_id)
self.tokenizer = get_index_tokenizer(self.index_id)(self.stemming_language,
self.remove_stopwords,
self.remove_html_markup,
self.remove_latex_markup)
self.tokenizer_type = detect_tokenizer_type(self.tokenizer)
self.default_tokenizer_function = self.tokenizer.get_tokenizing_function(table_type)
self.special_tags = self._handle_special_tags()
if self.stemming_language and self.table_name.startswith('idxWORD'):
write_message('%s has stemming enabled, language %s' % (self.table_name, self.stemming_language))
def _handle_special_tags(self):
"""
Fills in a dict with special tags which
always use the same tokenizer and this
tokenizer is independent of index.
"""
special_tags = {}
fields = self.tags + self.nonmarc_tags
for tag in fields:
if tag in CFG_BIBINDEX_SPECIAL_TAGS:
for t in CFG_BIBINDEX_INDEX_TABLE_TYPE:
if self.table_type == CFG_BIBINDEX_INDEX_TABLE_TYPE[t]:
tokenizer_name = CFG_BIBINDEX_SPECIAL_TAGS[tag][t]
tokenizer = _TOKENIZERS[tokenizer_name]
instance = tokenizer(self.stemming_language,
self.remove_stopwords,
self.remove_html_markup,
self.remove_latex_markup)
special_tags[tag] = instance.get_tokenizing_function(self.table_type)
break
return special_tags
def turn_off_virtual_indexes(self):
"""
Prevents from reindexing related virtual indexes.
"""
self.virtual_indexes = []
def turn_on_virtual_indexes(self):
"""
Turns on indexing related virtual indexes.
"""
self.virtual_indexes = get_index_virtual_indexes(self.index_id)
def get_field(self, recID, tag):
"""Returns list of values of the MARC-21 'tag' fields for the
record 'recID'."""
out = []
bibXXx = "bib" + tag[0] + tag[1] + "x"
bibrec_bibXXx = "bibrec_" + bibXXx
query = """SELECT value FROM %s AS b, %s AS bb
WHERE bb.id_bibrec=%%s AND bb.id_bibxxx=b.id
AND tag LIKE %%s""" % (bibXXx, bibrec_bibXXx)
res = run_sql(query, (recID, tag))
for row in res:
out.append(row[0])
return out
def notify_virtual_indexes(self, recID_ranges):
"""
Informs all related virtual indexes about index change.
Function leaves information about the change for each index
in proper table in database (idxSOMETHINGxxQ).
@param recID_ranges: low and high recIDs of ranges
@type recID_ranges: list [[low_id1, high_id1], [low_id2, high_id2]...]
"""
query = """INSERT INTO %s (runtime, id_bibrec_low, id_bibrec_high, index_name, mode)
VALUES (%%s, %%s, %%s, %%s, %%s)"""
for index_id, index_name in self.virtual_indexes:
tab_name = "idx%s%02dQ" % (self.table_type, index_id)
full_query = query % tab_name
for recID_range in recID_ranges:
run_sql(full_query, (self.timestamp,
recID_range[0],
recID_range[1],
self.index_name,
self.virtual_index_update_mode))
def display(self):
"Displays the word table."
keys = self.value.keys()
keys.sort()
for k in keys:
write_message("%s: %s" % (k, self.value[k]))
def count(self):
"Returns the number of words in the table."
return len(self.value)
def info(self):
"Prints some information on the words table."
write_message("The words table contains %d words." % self.count())
def lookup_words(self, word=""):
"Lookup word from the words table."
if not word:
done = 0
while not done:
try:
word = raw_input("Enter word: ")
done = 1
except (EOFError, KeyboardInterrupt):
return
if self.value.has_key(word):
write_message("The word '%s' is found %d times." \
% (word, len(self.value[word])))
else:
write_message("The word '%s' does not exist in the word file."\
% word)
def add_recIDs(self, recIDs, opt_flush):
"""Fetches records which id in the recIDs range list and adds
them to the wordTable. The recIDs range list is of the form:
[[i1_low,i1_high],[i2_low,i2_high], ..., [iN_low,iN_high]].
"""
global chunksize, _last_word_table
flush_count = 0
records_done = 0
records_to_go = 0
for arange in recIDs:
records_to_go = records_to_go + arange[1] - arange[0] + 1
time_started = time.time() # will measure profile time
for arange in recIDs:
i_low = arange[0]
chunksize_count = 0
while i_low <= arange[1]:
task_sleep_now_if_required()
# calculate chunk group of recIDs and treat it:
i_high = min(i_low + opt_flush - flush_count - 1, arange[1])
i_high = min(i_low + chunksize - chunksize_count - 1, i_high)
try:
self.chk_recID_range(i_low, i_high)
except StandardError:
if self.index_name == 'fulltext' and CFG_SOLR_URL:
solr_commit()
raise
write_message(CFG_BIBINDEX_ADDING_RECORDS_STARTED_STR % \
(self.table_name, i_low, i_high))
if CFG_CHECK_MYSQL_THREADS:
kill_sleepy_mysql_threads()
percentage_display = get_percentage_completed(records_done, records_to_go)
task_update_progress("(%s:%s) adding recs %d-%d %s" % (self.table_name, self.index_name, i_low, i_high, percentage_display))
self.del_recID_range(i_low, i_high)
just_processed = self.add_recID_range(i_low, i_high)
flush_count = flush_count + i_high - i_low + 1
chunksize_count = chunksize_count + i_high - i_low + 1
records_done = records_done + just_processed
write_message(CFG_BIBINDEX_ADDING_RECORDS_STARTED_STR % \
(self.table_name, i_low, i_high))
if chunksize_count >= chunksize:
chunksize_count = 0
# flush if necessary:
if flush_count >= opt_flush:
self.put_into_db()
self.clean()
if self.index_name == 'fulltext' and CFG_SOLR_URL:
solr_commit()
write_message("%s backing up" % (self.table_name))
flush_count = 0
self.log_progress(time_started, records_done, records_to_go)
# iterate:
i_low = i_high + 1
if flush_count > 0:
self.put_into_db()
if self.index_name == 'fulltext' and CFG_SOLR_URL:
solr_commit()
self.log_progress(time_started, records_done, records_to_go)
self.notify_virtual_indexes(recIDs)
def add_recID_range(self, recID1, recID2):
"""Add records from RECID1 to RECID2."""
wlist = {}
self.recIDs_in_mem.append([recID1, recID2])
# special case of author indexes where we also add author
# canonical IDs:
if self.index_name in ('author', 'firstauthor', 'exactauthor', 'exactfirstauthor'):
for recID in range(recID1, recID2 + 1):
if not wlist.has_key(recID):
wlist[recID] = []
wlist[recID] = list_union(get_author_canonical_ids_for_recid(recID),
wlist[recID])
marc, nonmarc = self.find_nonmarc_records(recID1, recID2)
if marc:
collector = TermCollector(self.tokenizer,
self.tokenizer_type,
self.table_type,
self.tags,
[recID1, recID2])
collector.set_special_tags(self.special_tags)
wlist = collector.collect(marc, wlist)
if nonmarc:
collector = NonmarcTermCollector(self.tokenizer,
self.tokenizer_type,
self.table_type,
self.nonmarc_tags,
[recID1, recID2])
collector.set_special_tags(self.special_tags)
wlist = collector.collect(nonmarc, wlist)
# lookup index-time synonyms:
synonym_kbrs = get_all_synonym_knowledge_bases()
if synonym_kbrs.has_key(self.index_name):
if len(wlist) == 0: return 0
recIDs = wlist.keys()
for recID in recIDs:
for word in wlist[recID]:
word_synonyms = get_synonym_terms(word,
synonym_kbrs[self.index_name][0],
synonym_kbrs[self.index_name][1],
use_memoise=True)
if word_synonyms:
wlist[recID] = list_union(word_synonyms, wlist[recID])
# were there some words for these recIDs found?
recIDs = wlist.keys()
for recID in recIDs:
# was this record marked as deleted?
if "DELETED" in self.get_field(recID, "980__c"):
wlist[recID] = []
write_message("... record %d was declared deleted, removing its word list" % recID, verbose=9)
write_message("... record %d, termlist: %s" % (recID, wlist[recID]), verbose=9)
if len(wlist) == 0: return 0
# put words into reverse index table with FUTURE status:
for recID in recIDs:
run_sql("INSERT INTO %sR (id_bibrec,termlist,type) VALUES (%%s,%%s,'FUTURE')" % wash_table_column_name(self.table_name[:-1]), (recID, serialize_via_marshal(wlist[recID]))) # kwalitee: disable=sql
# ... and, for new records, enter the CURRENT status as empty:
try:
run_sql("INSERT INTO %sR (id_bibrec,termlist,type) VALUES (%%s,%%s,'CURRENT')" % wash_table_column_name(self.table_name[:-1]), (recID, serialize_via_marshal([]))) # kwalitee: disable=sql
except DatabaseError:
# okay, it's an already existing record, no problem
pass
# put words into memory word list:
put = self.put
for recID in recIDs:
for w in wlist[recID]:
put(recID, w, 1)
return len(recIDs)
def find_nonmarc_records(self, recID1, recID2):
"""Divides recID range into two different tables,
first one contains only recIDs of the records that
are Marc type and the second one contains records
of nonMarc type"""
marc = range(recID1, recID2 + 1)
nonmarc = []
query = """SELECT id FROM %s WHERE master_format <> 'marc'
AND id BETWEEN %%s AND %%s""" % "bibrec"
res = run_sql(query, (recID1, recID2))
if res:
nonmarc = list(zip(*res)[0])
if len(nonmarc) == (recID2 - recID1 + 1):
nonmarc = xrange(recID1, recID2 + 1)
marc = []
else:
for recID in nonmarc:
marc.remove(recID)
else:
marc = xrange(recID1, recID2 + 1)
return [marc, nonmarc]
def log_progress(self, start, done, todo):
"""Calculate progress and store it.
start: start time,
done: records processed,
todo: total number of records"""
time_elapsed = time.time() - start
# consistency check
if time_elapsed == 0 or done > todo:
return
time_recs_per_min = done / (time_elapsed / 60.0)
write_message("%d records took %.1f seconds to complete.(%1.f recs/min)"\
% (done, time_elapsed, time_recs_per_min))
if time_recs_per_min:
write_message("Estimated runtime: %.1f minutes" % \
((todo - done) / time_recs_per_min))
def put(self, recID, word, sign):
"""Keeps track of changes done during indexing
and stores these changes in memory for further use.
Indexing process needs this information later while
filling in the database.
@param recID: recID of the record we want to update in memory
@param word: word we want to update
@param sing: sign of the word, 1 means keep this word in database,
-1 remove word from database
"""
value = self.value
try:
if self.wash_index_terms:
word = wash_index_term(word, self.wash_index_terms)
if value.has_key(word):
# the word 'word' exist already: update sign
value[word][recID] = sign
else:
value[word] = {recID: sign}
except Exception as e:
write_message(
"Error: Cannot put word %s with sign %d for recID %s (%s)."
% (word, sign, recID, e)
)
def del_recIDs(self, recIDs):
"""Fetches records which id in the recIDs range list and adds
them to the wordTable. The recIDs range list is of the form:
[[i1_low,i1_high],[i2_low,i2_high], ..., [iN_low,iN_high]].
"""
count = 0
for arange in recIDs:
task_sleep_now_if_required()
self.del_recID_range(arange[0], arange[1])
count = count + arange[1] - arange[0]
self.virtual_index_update_mode = CFG_BIBINDEX_UPDATE_MODE["Remove"]
self.put_into_db()
self.notify_virtual_indexes(recIDs)
if self.index_name == 'fulltext' and CFG_SOLR_URL:
solr_commit()
def del_recID_range(self, low, high):
"""Deletes records with 'recID' system number between low
and high from memory words index table."""
write_message("%s fetching existing words for records #%d-#%d started" % \
(self.table_name, low, high), verbose=3)
self.recIDs_in_mem.append([low, high])
query = """SELECT id_bibrec,termlist FROM %sR as bb WHERE bb.id_bibrec
BETWEEN %%s AND %%s""" % (self.table_name[:-1])
recID_rows = run_sql(query, (low, high))
for recID_row in recID_rows:
recID = recID_row[0]
wlist = deserialize_via_marshal(recID_row[1])
for word in wlist:
self.put(recID, word, -1)
write_message("%s fetching existing words for records #%d-#%d ended" % \
(self.table_name, low, high), verbose=3)
def check_bad_words(self):
"""
Finds bad words in reverse tables. Returns True in case of bad words.
"""
query = """SELECT 1 FROM %sR WHERE type IN ('TEMPORARY','FUTURE') LIMIT 1""" \
% (self.table_name[:-1],)
res = run_sql(query)
return bool(res)
def report_on_table_consistency(self):
"""Check reverse words index tables (e.g. idxWORD01R) for
interesting states such as 'TEMPORARY' state.
Prints small report (no of words, no of bad words).
"""
# find number of words:
query = """SELECT COUNT(1) FROM %s""" % (self.table_name)
res = run_sql(query, None, 1)
if res:
nb_words = res[0][0]
else:
nb_words = 0
# report stats:
write_message("%s contains %d words" % (self.table_name, nb_words))
# find possible bad states in reverse tables:
if self.check_bad_words():
write_message("EMERGENCY: %s needs to be repaired" %
(self.table_name, ))
else:
write_message("%s is in consistent state" % (self.table_name))
def repair(self, opt_flush):
"""Repair the whole table"""
# find possible bad states in reverse tables:
if not self.check_bad_words():
return
query = """SELECT id_bibrec FROM %sR WHERE type IN ('TEMPORARY','FUTURE')""" \
% (self.table_name[:-1])
res = intbitset(run_sql(query))
recIDs = create_range_list(list(res))
flush_count = 0
records_done = 0
records_to_go = 0
for arange in recIDs:
records_to_go = records_to_go + arange[1] - arange[0] + 1
time_started = time.time() # will measure profile time
for arange in recIDs:
i_low = arange[0]
chunksize_count = 0
while i_low <= arange[1]:
task_sleep_now_if_required()
# calculate chunk group of recIDs and treat it:
i_high = min(i_low + opt_flush - flush_count - 1, arange[1])
i_high = min(i_low + chunksize - chunksize_count - 1, i_high)
self.fix_recID_range(i_low, i_high)
flush_count = flush_count + i_high - i_low + 1
chunksize_count = chunksize_count + i_high - i_low + 1
records_done = records_done + i_high - i_low + 1
if chunksize_count >= chunksize:
chunksize_count = 0
# flush if necessary:
if flush_count >= opt_flush:
self.put_into_db("emergency")
self.clean()
flush_count = 0
self.log_progress(time_started, records_done, records_to_go)
# iterate:
i_low = i_high + 1
if flush_count > 0:
self.put_into_db("emergency")
self.log_progress(time_started, records_done, records_to_go)
write_message("%s inconsistencies repaired." % self.table_name)
def chk_recID_range(self, low, high):
"""Check if the reverse index table is in proper state"""
## check db
query = """SELECT 1 FROM %sR WHERE type IN ('TEMPORARY','FUTURE')
AND id_bibrec BETWEEN %%s AND %%s LIMIT 1""" % self.table_name[:-1]
res = run_sql(query, (low, high), 1)
if not res:
write_message("%s for %d-%d is in consistent state" % (self.table_name, low, high))
return # okay, words table is consistent
## inconsistency detected!
write_message("EMERGENCY: %s inconsistencies detected..." % self.table_name)
error_message = "Errors found. You should check consistency of the " \
"%s - %sR tables.\nRunning 'bibindex --repair' is " \
"recommended." % (self.table_name, self.table_name[:-1])
write_message("EMERGENCY: " + error_message, stream=sys.stderr)
raise StandardError(error_message)
def fix_recID_range(self, low, high):
"""Try to fix reverse index database consistency
(e.g. table idxWORD01R) in the low,high doc-id range.
Possible states for a recID follow:
CUR TMP FUT: very bad things have happened: warn!
CUR TMP : very bad things have happened: warn!
CUR FUT: delete FUT (crash before flushing)
CUR : database is ok
TMP FUT: add TMP to memory and del FUT from memory
flush (revert to old state)
TMP : very bad things have happened: warn!
FUT: very bad things have happended: warn!
"""
state = {}
query = "SELECT id_bibrec,type FROM %sR WHERE id_bibrec BETWEEN %%s AND %%s"\
% self.table_name[:-1]
res = run_sql(query, (low, high))
for row in res:
if not state.has_key(row[0]):
state[row[0]] = []
state[row[0]].append(row[1])
ok = 1 # will hold info on whether we will be able to repair
for recID in state.keys():
if not 'TEMPORARY' in state[recID]:
if 'FUTURE' in state[recID]:
if 'CURRENT' not in state[recID]:
write_message("EMERGENCY: Index record %d is in inconsistent state. Can't repair it." % recID)
ok = 0
else:
write_message("EMERGENCY: Inconsistency in index record %d detected" % recID)
query = """DELETE FROM %sR
WHERE id_bibrec=%%s""" % self.table_name[:-1]
run_sql(query, (recID,))
write_message("EMERGENCY: Inconsistency in record %d repaired." % recID)
else:
if 'FUTURE' in state[recID] and not 'CURRENT' in state[recID]:
self.recIDs_in_mem.append([recID, recID])
# Get the words file
query = """SELECT type,termlist FROM %sR
WHERE id_bibrec=%%s""" % self.table_name[:-1]
write_message(query, verbose=9)
res = run_sql(query, (recID,))
for row in res:
wlist = deserialize_via_marshal(row[1])
write_message("Words are %s " % wlist, verbose=9)
if row[0] == 'TEMPORARY':
sign = 1
else:
sign = -1
for word in wlist:
self.put(recID, word, sign)
else:
write_message("EMERGENCY: %s for %d is in inconsistent "
"state. Couldn't repair it." % (self.table_name,
recID), stream=sys.stderr)
ok = 0
if not ok:
error_message = "Unrepairable errors found. You should check " \
"consistency of the %s - %sR tables. Deleting affected " \
"TEMPORARY and FUTURE entries from these tables is " \
"recommended; see the BibIndex Admin Guide." % \
(self.table_name, self.table_name[:-1])
write_message("EMERGENCY: " + error_message, stream=sys.stderr)
raise StandardError(error_message)
def main():
"""Main that construct all the bibtask."""
task_init(authorization_action='runbibindex',
authorization_msg="BibIndex Task Submission",
description="""Examples:
\t%s -a -i 234-250,293,300-500 -u admin@localhost
\t%s -a -w author,fulltext -M 8192 -v3
\t%s -d -m +4d -A on --flush=10000\n""" % ((sys.argv[0],) * 3), help_specific_usage=""" Indexing options:
-a, --add\t\tadd or update words for selected records
-d, --del\t\tdelete words for selected records
-i, --id=low[-high]\t\tselect according to doc recID
-m, --modified=from[,to]\tselect according to modification date
-c, --collection=c1[,c2]\tselect according to collection
-R, --reindex\treindex the selected indexes from scratch
Repairing options:
-k, --check\t\tcheck consistency for all records in the table(s)
-r, --repair\t\ttry to repair all records in the table(s)
Specific options:
-w, --windex=w1[,w2]\tword/phrase indexes to consider (all)
-M, --maxmem=XXX\tmaximum memory usage in kB (no limit)
-f, --flush=NNN\t\tfull consistent table flush after NNN records (10000)
--force\t\tforce indexing of all records for provided indexes
-Z, --remove-dependent-index=w name of an index for removing from virtual index
-l --all-virtual\t\t set of all virtual indexes; the same as: -w virtual_ind1, virtual_ind2, ...
""",
version=__revision__,
specific_params=("adi:m:c:w:krRM:f:oZ:l", [
"add",
"del",
"id=",
"modified=",
"collection=",
"windex=",
"check",
"repair",
"reindex",
"maxmem=",
"flush=",
"force",
"remove-dependent-index=",
"all-virtual"
]),
task_stop_helper_fnc=task_stop_table_close_fnc,
task_submit_elaborate_specific_parameter_fnc=task_submit_elaborate_specific_parameter,
task_run_fnc=task_run_core,
task_submit_check_options_fnc=task_submit_check_options)
def task_submit_check_options():
"""Check for options compatibility."""
if task_get_option("reindex"):
if task_get_option("cmd") != "add" or task_get_option('id') or task_get_option('collection'):
print >> sys.stderr, "ERROR: You can use --reindex only when adding modified record."
return False
return True
def task_submit_elaborate_specific_parameter(key, value, opts, args):
""" Given the string key it checks it's meaning, eventually using the
value. Usually it fills some key in the options dict.
It must return True if it has elaborated the key, False, if it doesn't
know that key.
eg:
if key in ['-n', '--number']:
self.options['number'] = value
return True
return False
"""
if key in ("-a", "--add"):
task_set_option("cmd", "add")
if ("-x", "") in opts or ("--del", "") in opts:
raise StandardError("Can not have --add and --del at the same time!")
elif key in ("-k", "--check"):
task_set_option("cmd", "check")
elif key in ("-r", "--repair"):
task_set_option("cmd", "repair")
elif key in ("-d", "--del"):
task_set_option("cmd", "del")
elif key in ("-i", "--id"):
task_set_option('id', task_get_option('id') + split_ranges(value))
elif key in ("-m", "--modified"):
task_set_option("modified", get_date_range(value))
elif key in ("-c", "--collection"):
task_set_option("collection", value)
elif key in ("-R", "--reindex"):
task_set_option("reindex", True)
elif key in ("-w", "--windex"):
task_set_option("windex", value)
elif key in ("-M", "--maxmem"):
task_set_option("maxmem", int(value))
if task_get_option("maxmem") < base_process_size + 1000:
raise StandardError("Memory usage should be higher than %d kB" % \
(base_process_size + 1000))
elif key in ("-f", "--flush"):
task_set_option("flush", int(value))
elif key in ("-o", "--force"):
task_set_option("force", True)
elif key in ("-Z", "--remove-dependent-index",):
task_set_option("remove-dependent-index", value)
elif key in ("-l", "--all-virtual",):
task_set_option("all-virtual", True)
else:
return False
return True
def task_stop_table_close_fnc():
""" Close tables to STOP. """
global _last_word_table
if _last_word_table:
_last_word_table.put_into_db()
def get_recIDs_by_date_bibliographic(dates, index_name, force_all=False):
""" Finds records that were modified between DATES[0] and DATES[1]
for given index.
If DATES is not set, then finds records that were modified since
the last update of the index.
@param wordtable_type: can be 'Words', 'Pairs' or 'Phrases'
"""
index_id = get_index_id_from_index_name(index_name)
if not dates:
query = """SELECT last_updated FROM idxINDEX WHERE id=%s"""
res = run_sql(query, (index_id,))
if not res:
return set([])
if not res[0][0] or force_all:
dates = ("0000-00-00", None)
else:
dates = (res[0][0], None)
if dates[1] is None:
res = intbitset(run_sql("""SELECT b.id FROM bibrec AS b WHERE b.modification_date >= %s""",
(dates[0],)))
if index_name == 'fulltext':
res |= intbitset(run_sql("""SELECT id_bibrec FROM bibrec_bibdoc JOIN bibdoc ON id_bibdoc=id
WHERE text_extraction_date <= modification_date AND
modification_date >= %s
AND status<>'DELETED'""",
(dates[0],)))
elif dates[0] is None:
res = intbitset(run_sql("""SELECT b.id FROM bibrec AS b WHERE b.modification_date <= %s""",
(dates[1],)))
if index_name == 'fulltext':
res |= intbitset(run_sql("""SELECT id_bibrec FROM bibrec_bibdoc JOIN bibdoc ON id_bibdoc=id
WHERE text_extraction_date <= modification_date
AND modification_date <= %s
AND status<>'DELETED'""",
(dates[1],)))
else:
res = intbitset(run_sql("""SELECT b.id FROM bibrec AS b
WHERE b.modification_date >= %s AND
b.modification_date <= %s""",
(dates[0], dates[1])))
if index_name == 'fulltext':
res |= intbitset(run_sql("""SELECT id_bibrec FROM bibrec_bibdoc JOIN bibdoc ON id_bibdoc=id
WHERE text_extraction_date <= modification_date AND
modification_date >= %s AND
modification_date <= %s AND
status<>'DELETED'""",
(dates[0], dates[1],)))
# special case of author indexes where we need to re-index
# those records that were affected by changed BibAuthorID attributions:
if index_name in ('author', 'firstauthor', 'exactauthor', 'exactfirstauthor'):
from invenio.bibauthorid_personid_maintenance import get_recids_affected_since
# dates[1] is ignored, since BibAuthorID API does not offer upper limit search
rec_list_author = get_recids_affected_since(dates[0], dates[1])
res = res | rec_list_author
return set(res)
def get_recIDs_by_date_authority(dates, index_name, force_all=False):
""" Finds records that were modified between DATES[0] and DATES[1]
for given index.
If DATES is not set, then finds records that were modified since
the last update of the index.
Searches for bibliographic records connected to authority records
that have been changed.
"""
index_id = get_index_id_from_index_name(index_name)
index_tags = get_index_tags(index_name)
if not dates:
query = """SELECT last_updated FROM idxINDEX WHERE id=%s"""
res = run_sql(query, (index_id,))
if not res:
return set([])
if not res[0][0] or force_all:
dates = ("0000-00-00", None)
else:
dates = (res[0][0], None)
res = intbitset()
for tag in index_tags:
pattern = tag.replace('%', '*')
matches = fnmatch.filter(CFG_BIBAUTHORITY_CONTROLLED_FIELDS_BIBLIOGRAPHIC.keys(), pattern)
if not len(matches):
continue
for tag_match in matches:
# get the type of authority record associated with this field
auth_type = CFG_BIBAUTHORITY_CONTROLLED_FIELDS_BIBLIOGRAPHIC.get(tag_match)
# find updated authority records of this type
# dates[1] is ignored, needs dates[0] to find res
now = datetime.now()
auth_recIDs = search_pattern(p='980__a:' + auth_type) \
& search_unit_in_bibrec(str(dates[0]), str(now), search_type='m')
# now find dependent bibliographic records
for auth_recID in auth_recIDs:
# get the fix authority identifier of this authority record
control_nos = get_control_nos_from_recID(auth_recID)
# there may be multiple control number entries! (the '035' field is repeatable!)
for control_no in control_nos:
# get the bibrec IDs that refer to AUTHORITY_ID in TAG
tag_0 = tag_match[:5] + '0' # possibly do the same for '4' subfields ?
fieldvalue = '"' + control_no + '"'
res |= search_pattern(p=tag_0 + ':' + fieldvalue)
return set(res)
def get_not_updated_recIDs(modified_dates, indexes, force_all=False):
"""Finds not updated recIDs in database for indexes.
@param modified_dates: between this dates we should look for modified records
@type modified_dates: [date_old, date_new]
@param indexes: list of indexes
@type indexes: string separated by coma
@param force_all: if True all records will be taken
"""
found_recIDs = set()
write_message(CFG_BIBINDEX_UPDATE_MESSAGE)
for index in indexes:
found_recIDs |= get_recIDs_by_date_bibliographic(modified_dates, index, force_all)
found_recIDs |= get_recIDs_by_date_authority(modified_dates, index, force_all)
return list(sorted(found_recIDs))
def get_recIDs_from_cli(indexes=[]):
"""
Gets recIDs ranges from CLI for indexing when
user specified 'id' or 'collection' option or
search for modified recIDs for provided indexes
when recIDs are not specified.
@param indexes: it's a list of specified indexes, which
can be obtained from CLI with use of:
get_indexes_from_cli() function.
@type indexes: list of strings
"""
# need to first update idxINDEX table to find proper recIDs for reindexing
if task_get_option("reindex"):
for index_name in indexes:
run_sql("""UPDATE idxINDEX SET last_updated='0000-00-00 00:00:00'
WHERE name=%s""", (index_name,))
if task_get_option("id"):
return task_get_option("id")
elif task_get_option("collection"):
l_of_colls = task_get_option("collection").split(",")
recIDs = perform_request_search(c=l_of_colls)
recIDs_range = []
for recID in recIDs:
recIDs_range.append([recID, recID])
return recIDs_range
elif task_get_option("cmd") == "add":
recs = get_not_updated_recIDs(task_get_option("modified"),
indexes,
task_get_option("force"))
recIDs_range = beautify_range_list(create_range_list(recs))
return recIDs_range
return []
def get_indexes_from_cli():
"""
Gets indexes from CLI and checks if they are
valid. If indexes weren't specified function
will return all known indexes.
"""
indexes = task_get_option("windex")
all_virtual = task_get_option("all-virtual")
if all_virtual:
indexes = filter_for_virtual_indexes(get_all_indexes())
elif not indexes:
indexes = get_all_indexes()
else:
indexes = indexes.split(",")
indexes = remove_inexistent_indexes(indexes, leave_virtual=True)
return indexes
def remove_dependent_index(virtual_indexes, dependent_index):
"""
Removes dependent index from virtual indexes.
@param virtual_indexes: names of virtual_indexes
@type virtual_indexes: list of strings
@param dependent_index: name of dependent index
@type dependent_index: string
"""
if not virtual_indexes:
write_message("You should specify a name of a virtual index...")
return
id_dependent = get_index_id_from_index_name(dependent_index)
for index_name in virtual_indexes:
index_id = get_index_id_from_index_name(index_name)
for type_ in CFG_BIBINDEX_INDEX_TABLE_TYPE.itervalues():
vit = VirtualIndexTable(index_name, type_)
vit.remove_dependent_index(dependent_index)
task_sleep_now_if_required()
query = """DELETE FROM idxINDEX_idxINDEX WHERE id_virtual=%s AND id_normal=%s"""
run_sql(query, (index_id, id_dependent))
def should_update_virtual_indexes():
"""
Decides if any virtual indexes should be updated.
Decision is made based on arguments obtained
from CLI.
"""
return task_get_option("all-virtual") or task_get_option("windex")
def update_virtual_indexes(virtual_indexes, reindex=False):
"""
Function will update all specified virtual_indexes.
@param virtual_indexes: list of index names
@param reindex: shall we reindex given v.indexes from scratch?
"""
kwargs = {}
if reindex:
kwargs.update({'table_prefix': 'tmp_'})
for index_name in virtual_indexes:
if reindex:
index_id = get_index_id_from_index_name(index_name)
init_temporary_reindex_tables(index_id)
for key, type_ in CFG_BIBINDEX_INDEX_TABLE_TYPE.iteritems():
kwargs.update({'wash_index_terms': CFG_BIBINDEX_WASH_INDEX_TERMS[key]})
vit = VirtualIndexTable(index_name, type_, **kwargs)
vit.set_reindex_mode()
vit.run_update()
swap_temporary_reindex_tables(index_id)
update_index_last_updated([index_name], task_get_task_param('task_starting_time'))
task_sleep_now_if_required(can_stop_too=True)
else:
for key, type_ in CFG_BIBINDEX_INDEX_TABLE_TYPE.iteritems():
kwargs.update({'wash_index_terms': CFG_BIBINDEX_WASH_INDEX_TERMS[key]})
vit = VirtualIndexTable(index_name, type_, **kwargs)
vit.run_update()
task_sleep_now_if_required(can_stop_too=True)
def task_run_core():
"""Runs the task by fetching arguments from the BibSched task queue.
This is what BibSched will be invoking via daemon call.
"""
global _last_word_table
indexes = get_indexes_from_cli()
if len(indexes) == 0:
write_message("Specified indexes can't be found.")
return True
virtual_indexes = filter_for_virtual_indexes(indexes)
regular_indexes = list(set(indexes) - set(virtual_indexes))
# check tables consistency
if task_get_option("cmd") == "check":
for index_name in indexes:
wordTable = WordTable(index_name=index_name,
table_type=CFG_BIBINDEX_INDEX_TABLE_TYPE["Words"],
wash_index_terms=50)
_last_word_table = wordTable
wordTable.report_on_table_consistency()
task_sleep_now_if_required(can_stop_too=True)
wordTable = WordTable(index_name=index_name,
table_type=CFG_BIBINDEX_INDEX_TABLE_TYPE["Pairs"],
wash_index_terms=100)
_last_word_table = wordTable
wordTable.report_on_table_consistency()
task_sleep_now_if_required(can_stop_too=True)
wordTable = WordTable(index_name=index_name,
table_type=CFG_BIBINDEX_INDEX_TABLE_TYPE["Phrases"],
wash_index_terms=0)
_last_word_table = wordTable
wordTable.report_on_table_consistency()
task_sleep_now_if_required(can_stop_too=True)
_last_word_table = None
return True
# virtual index: remove dependent index
if task_get_option("remove-dependent-index"):
remove_dependent_index(indexes,
task_get_option("remove-dependent-index"))
return True
# virtual index: update
if should_update_virtual_indexes():
update_virtual_indexes(virtual_indexes, task_get_option("reindex"))
if len(regular_indexes) == 0:
return True
# regular index: initialization for Words,Pairs,Phrases
recIDs_range = get_recIDs_from_cli(regular_indexes)
# FIXME: restore when the hstRECORD table race condition between
# bibupload and bibindex is solved
# recIDs_for_index = find_affected_records_for_index(regular_indexes,
# recIDs_range,
# (task_get_option("force") or \
# task_get_option("reindex") or \
# task_get_option("cmd") == "del"))
recIDs_for_index = find_affected_records_for_index(regular_indexes,
recIDs_range,
True)
if len(recIDs_for_index.keys()) == 0:
write_message("Selected indexes/recIDs are up to date.")
# Let's work on single words!
for index_name in recIDs_for_index.keys():
index_id = get_index_id_from_index_name(index_name)
reindex_prefix = ""
if task_get_option("reindex"):
reindex_prefix = "tmp_"
init_temporary_reindex_tables(index_id, reindex_prefix)
wordTable = WordTable(index_name=index_name,
table_type=CFG_BIBINDEX_INDEX_TABLE_TYPE["Words"],
table_prefix=reindex_prefix,
wash_index_terms=50)
_last_word_table = wordTable
wordTable.report_on_table_consistency()
try:
if task_get_option("cmd") == "del":
if task_get_option("id") or task_get_option("collection"):
wordTable.del_recIDs(recIDs_range)
task_sleep_now_if_required(can_stop_too=True)
else:
error_message = "Missing IDs of records to delete from " \
"index %s." % wordTable.table_name
write_message(error_message, stream=sys.stderr)
raise StandardError(error_message)
elif task_get_option("cmd") == "add":
final_recIDs = beautify_range_list(create_range_list(recIDs_for_index[index_name]))
wordTable.add_recIDs(final_recIDs, task_get_option("flush"))
task_sleep_now_if_required(can_stop_too=True)
elif task_get_option("cmd") == "repair":
wordTable.repair(task_get_option("flush"))
task_sleep_now_if_required(can_stop_too=True)
else:
error_message = "Invalid command found processing %s" % \
wordTable.table_name
write_message(error_message, stream=sys.stderr)
raise StandardError(error_message)
except StandardError, e:
write_message("Exception caught: %s" % e, sys.stderr)
register_exception(alert_admin=True)
if _last_word_table:
_last_word_table.put_into_db()
raise
wordTable.report_on_table_consistency()
task_sleep_now_if_required(can_stop_too=True)
# Let's work on pairs now
wordTable = WordTable(index_name=index_name,
table_type=CFG_BIBINDEX_INDEX_TABLE_TYPE["Pairs"],
table_prefix=reindex_prefix,
wash_index_terms=100)
_last_word_table = wordTable
wordTable.report_on_table_consistency()
try:
if task_get_option("cmd") == "del":
if task_get_option("id") or task_get_option("collection"):
wordTable.del_recIDs(recIDs_range)
task_sleep_now_if_required(can_stop_too=True)
else:
error_message = "Missing IDs of records to delete from " \
"index %s." % wordTable.table_name
write_message(error_message, stream=sys.stderr)
raise StandardError(error_message)
elif task_get_option("cmd") == "add":
final_recIDs = beautify_range_list(create_range_list(recIDs_for_index[index_name]))
wordTable.add_recIDs(final_recIDs, task_get_option("flush"))
task_sleep_now_if_required(can_stop_too=True)
elif task_get_option("cmd") == "repair":
wordTable.repair(task_get_option("flush"))
task_sleep_now_if_required(can_stop_too=True)
else:
error_message = "Invalid command found processing %s" % \
wordTable.table_name
write_message(error_message, stream=sys.stderr)
raise StandardError(error_message)
except StandardError, e:
write_message("Exception caught: %s" % e, sys.stderr)
register_exception()
if _last_word_table:
_last_word_table.put_into_db()
raise
wordTable.report_on_table_consistency()
task_sleep_now_if_required(can_stop_too=True)
# Let's work on phrases now
wordTable = WordTable(index_name=index_name,
table_type=CFG_BIBINDEX_INDEX_TABLE_TYPE["Phrases"],
table_prefix=reindex_prefix,
wash_index_terms=0)
_last_word_table = wordTable
wordTable.report_on_table_consistency()
try:
if task_get_option("cmd") == "del":
if task_get_option("id") or task_get_option("collection"):
wordTable.del_recIDs(recIDs_range)
task_sleep_now_if_required(can_stop_too=True)
else:
error_message = "Missing IDs of records to delete from " \
"index %s." % wordTable.table_name
write_message(error_message, stream=sys.stderr)
raise StandardError(error_message)
elif task_get_option("cmd") == "add":
final_recIDs = beautify_range_list(create_range_list(recIDs_for_index[index_name]))
wordTable.add_recIDs(final_recIDs, task_get_option("flush"))
if not task_get_option("id") and not task_get_option("collection"):
update_index_last_updated([index_name], task_get_task_param('task_starting_time'))
task_sleep_now_if_required(can_stop_too=True)
elif task_get_option("cmd") == "repair":
wordTable.repair(task_get_option("flush"))
task_sleep_now_if_required(can_stop_too=True)
else:
error_message = "Invalid command found processing %s" % \
wordTable.table_name
write_message(error_message, stream=sys.stderr)
raise StandardError(error_message)
except StandardError, e:
write_message("Exception caught: %s" % e, sys.stderr)
register_exception()
if _last_word_table:
_last_word_table.put_into_db()
raise
wordTable.report_on_table_consistency()
task_sleep_now_if_required(can_stop_too=True)
if task_get_option("reindex"):
swap_temporary_reindex_tables(index_id, reindex_prefix)
update_index_last_updated([index_name], task_get_task_param('task_starting_time'))
task_sleep_now_if_required(can_stop_too=True)
# update modification date also for indexes that were up to date
if not task_get_option("id") and not task_get_option("collection") and \
task_get_option("cmd") == "add":
up_to_date = set(indexes) - set(recIDs_for_index.keys())
update_index_last_updated(list(up_to_date), task_get_task_param('task_starting_time'))
_last_word_table = None
return True
### okay, here we go:
if __name__ == '__main__':
main()
|
gpl-2.0
| 2,835,064,578,374,895,600 | 3,060,760,496,212,097,500 | 43.014795 | 207 | 0.561683 | false |
google/mobly
|
mobly/controllers/android_device.py
|
1
|
38734
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import logging
import os
import re
import shutil
import time
from mobly import logger as mobly_logger
from mobly import runtime_test_info
from mobly import utils
from mobly.controllers.android_device_lib import adb
from mobly.controllers.android_device_lib import errors
from mobly.controllers.android_device_lib import fastboot
from mobly.controllers.android_device_lib import service_manager
from mobly.controllers.android_device_lib.services import logcat
from mobly.controllers.android_device_lib.services import snippet_management_service
# Convenience constant for the package of Mobly Bundled Snippets
# (http://github.com/google/mobly-bundled-snippets).
MBS_PACKAGE = 'com.google.android.mobly.snippet.bundled'
MOBLY_CONTROLLER_CONFIG_NAME = 'AndroidDevice'
ANDROID_DEVICE_PICK_ALL_TOKEN = '*'
_DEBUG_PREFIX_TEMPLATE = '[AndroidDevice|%s] %s'
# Key name for adb logcat extra params in config file.
ANDROID_DEVICE_ADB_LOGCAT_PARAM_KEY = 'adb_logcat_param'
ANDROID_DEVICE_EMPTY_CONFIG_MSG = 'Configuration is empty, abort!'
ANDROID_DEVICE_NOT_LIST_CONFIG_MSG = 'Configuration should be a list, abort!'
# System properties that are cached by the `AndroidDevice.build_info` property.
# The only properties on this list should be read-only system properties.
CACHED_SYSTEM_PROPS = [
'ro.build.id',
'ro.build.type',
'ro.build.version.codename',
'ro.build.version.sdk',
'ro.build.product',
'ro.build.characteristics',
'ro.debuggable',
'ro.product.name',
'ro.hardware',
]
# Keys for attributes in configs that alternate the controller module behavior.
# If this is False for a device, errors from that device will be ignored
# during `create`. Default is True.
KEY_DEVICE_REQUIRED = 'required'
DEFAULT_VALUE_DEVICE_REQUIRED = True
# If True, logcat collection will not be started during `create`.
# Default is False.
KEY_SKIP_LOGCAT = 'skip_logcat'
DEFAULT_VALUE_SKIP_LOGCAT = False
SERVICE_NAME_LOGCAT = 'logcat'
# Default name for bug reports taken without a specified test name.
DEFAULT_BUG_REPORT_NAME = 'bugreport'
# Default Timeout to wait for boot completion
DEFAULT_TIMEOUT_BOOT_COMPLETION_SECOND = 15 * 60
# Timeout for the adb command for taking a screenshot
TAKE_SCREENSHOT_TIMEOUT_SECOND = 10
# Aliases of error types for backward compatibility.
Error = errors.Error
DeviceError = errors.DeviceError
SnippetError = snippet_management_service.Error
# Regex to heuristically determine if the device is an emulator.
EMULATOR_SERIAL_REGEX = re.compile(r'emulator-\d+')
def create(configs):
"""Creates AndroidDevice controller objects.
Args:
configs: A list of dicts, each representing a configuration for an
Android device.
Returns:
A list of AndroidDevice objects.
"""
if not configs:
raise Error(ANDROID_DEVICE_EMPTY_CONFIG_MSG)
elif configs == ANDROID_DEVICE_PICK_ALL_TOKEN:
ads = get_all_instances()
elif not isinstance(configs, list):
raise Error(ANDROID_DEVICE_NOT_LIST_CONFIG_MSG)
elif isinstance(configs[0], dict):
# Configs is a list of dicts.
ads = get_instances_with_configs(configs)
elif isinstance(configs[0], str):
# Configs is a list of strings representing serials.
ads = get_instances(configs)
else:
raise Error('No valid config found in: %s' % configs)
_start_services_on_ads(ads)
return ads
def destroy(ads):
"""Cleans up AndroidDevice objects.
Args:
ads: A list of AndroidDevice objects.
"""
for ad in ads:
try:
ad.services.stop_all()
except Exception:
ad.log.exception('Failed to clean up properly.')
def get_info(ads):
"""Get information on a list of AndroidDevice objects.
Args:
ads: A list of AndroidDevice objects.
Returns:
A list of dict, each representing info for an AndroidDevice objects.
"""
return [ad.device_info for ad in ads]
def _validate_device_existence(serials):
"""Validate that all the devices specified by the configs can be reached.
Args:
serials: list of strings, the serials of all the devices that are expected
to exist.
"""
valid_ad_identifiers = list_adb_devices() + list_adb_devices_by_usb_id()
for serial in serials:
if serial not in valid_ad_identifiers:
raise Error(f'Android device serial "{serial}" is specified in '
'config but is not reachable.')
def _start_services_on_ads(ads):
"""Starts long running services on multiple AndroidDevice objects.
If any one AndroidDevice object fails to start services, cleans up all
AndroidDevice objects and their services.
Args:
ads: A list of AndroidDevice objects whose services to start.
"""
for ad in ads:
start_logcat = not getattr(ad, KEY_SKIP_LOGCAT, DEFAULT_VALUE_SKIP_LOGCAT)
try:
if start_logcat:
ad.services.logcat.start()
except Exception:
is_required = getattr(ad, KEY_DEVICE_REQUIRED,
DEFAULT_VALUE_DEVICE_REQUIRED)
if is_required:
ad.log.exception('Failed to start some services, abort!')
destroy(ads)
raise
else:
ad.log.exception('Skipping this optional device because some '
'services failed to start.')
def parse_device_list(device_list_str, key):
"""Parses a byte string representing a list of devices.
The string is generated by calling either adb or fastboot. The tokens in
each string is tab-separated.
Args:
device_list_str: Output of adb or fastboot.
key: The token that signifies a device in device_list_str.
Returns:
A list of android device serial numbers.
"""
try:
clean_lines = str(device_list_str, 'utf-8').strip().split('\n')
except UnicodeDecodeError:
logging.warning("unicode decode error, origin str: %s", device_list_str)
raise
results = []
for line in clean_lines:
tokens = line.strip().split('\t')
if len(tokens) == 2 and tokens[1] == key:
results.append(tokens[0])
return results
def list_adb_devices():
"""List all android devices connected to the computer that are detected by
adb.
Returns:
A list of android device serials. Empty if there's none.
"""
out = adb.AdbProxy().devices()
return parse_device_list(out, 'device')
def list_adb_devices_by_usb_id():
"""List the usb id of all android devices connected to the computer that
are detected by adb.
Returns:
A list of strings that are android device usb ids. Empty if there's
none.
"""
out = adb.AdbProxy().devices(['-l'])
clean_lines = str(out, 'utf-8').strip().split('\n')
results = []
for line in clean_lines:
tokens = line.strip().split()
if len(tokens) > 2 and tokens[1] == 'device':
results.append(tokens[2])
return results
def list_fastboot_devices():
"""List all android devices connected to the computer that are in in
fastboot mode. These are detected by fastboot.
Returns:
A list of android device serials. Empty if there's none.
"""
out = fastboot.FastbootProxy().devices()
return parse_device_list(out, 'fastboot')
def get_instances(serials):
"""Create AndroidDevice instances from a list of serials.
Args:
serials: A list of android device serials.
Returns:
A list of AndroidDevice objects.
"""
_validate_device_existence(serials)
results = []
for s in serials:
results.append(AndroidDevice(s))
return results
def get_instances_with_configs(configs):
"""Create AndroidDevice instances from a list of dict configs.
Each config should have the required key-value pair 'serial'.
Args:
configs: A list of dicts each representing the configuration of one
android device.
Returns:
A list of AndroidDevice objects.
"""
# First make sure each config contains a serial, and all the serials'
# corresponding devices exist.
serials = []
for c in configs:
try:
serials.append(c['serial'])
except KeyError:
raise Error(
'Required value "serial" is missing in AndroidDevice config %s.' % c)
_validate_device_existence(serials)
results = []
for c in configs:
serial = c.pop('serial')
is_required = c.get(KEY_DEVICE_REQUIRED, True)
try:
ad = AndroidDevice(serial)
ad.load_config(c)
except Exception:
if is_required:
raise
ad.log.exception('Skipping this optional device due to error.')
continue
results.append(ad)
return results
def get_all_instances(include_fastboot=False):
"""Create AndroidDevice instances for all attached android devices.
Args:
include_fastboot: Whether to include devices in bootloader mode or not.
Returns:
A list of AndroidDevice objects each representing an android device
attached to the computer.
"""
if include_fastboot:
serial_list = list_adb_devices() + list_fastboot_devices()
return get_instances(serial_list)
return get_instances(list_adb_devices())
def filter_devices(ads, func):
"""Finds the AndroidDevice instances from a list that match certain
conditions.
Args:
ads: A list of AndroidDevice instances.
func: A function that takes an AndroidDevice object and returns True
if the device satisfies the filter condition.
Returns:
A list of AndroidDevice instances that satisfy the filter condition.
"""
results = []
for ad in ads:
if func(ad):
results.append(ad)
return results
def get_devices(ads, **kwargs):
"""Finds a list of AndroidDevice instance from a list that has specific
attributes of certain values.
Example:
get_devices(android_devices, label='foo', phone_number='1234567890')
get_devices(android_devices, model='angler')
Args:
ads: A list of AndroidDevice instances.
kwargs: keyword arguments used to filter AndroidDevice instances.
Returns:
A list of target AndroidDevice instances.
Raises:
Error: No devices are matched.
"""
def _get_device_filter(ad):
for k, v in kwargs.items():
if not hasattr(ad, k):
return False
elif getattr(ad, k) != v:
return False
return True
filtered = filter_devices(ads, _get_device_filter)
if not filtered:
raise Error('Could not find a target device that matches condition: %s.' %
kwargs)
else:
return filtered
def get_device(ads, **kwargs):
"""Finds a unique AndroidDevice instance from a list that has specific
attributes of certain values.
Example:
get_device(android_devices, label='foo', phone_number='1234567890')
get_device(android_devices, model='angler')
Args:
ads: A list of AndroidDevice instances.
kwargs: keyword arguments used to filter AndroidDevice instances.
Returns:
The target AndroidDevice instance.
Raises:
Error: None or more than one device is matched.
"""
filtered = get_devices(ads, **kwargs)
if len(filtered) == 1:
return filtered[0]
else:
serials = [ad.serial for ad in filtered]
raise Error('More than one device matched: %s' % serials)
def take_bug_reports(ads, test_name=None, begin_time=None, destination=None):
"""Takes bug reports on a list of android devices.
If you want to take a bug report, call this function with a list of
android_device objects in on_fail. But reports will be taken on all the
devices in the list concurrently. Bug report takes a relative long
time to take, so use this cautiously.
Args:
ads: A list of AndroidDevice instances.
test_name: Name of the test method that triggered this bug report.
If None, the default name "bugreport" will be used.
begin_time: timestamp taken when the test started, can be either
string or int. If None, the current time will be used.
destination: string, path to the directory where the bugreport
should be saved.
"""
if begin_time is None:
begin_time = mobly_logger.get_log_file_timestamp()
else:
begin_time = mobly_logger.sanitize_filename(str(begin_time))
def take_br(test_name, begin_time, ad, destination):
ad.take_bug_report(test_name=test_name,
begin_time=begin_time,
destination=destination)
args = [(test_name, begin_time, ad, destination) for ad in ads]
utils.concurrent_exec(take_br, args)
class AndroidDevice:
"""Class representing an android device.
Each object of this class represents one Android device in Mobly. This class
provides various ways, like adb, fastboot, and Mobly snippets, to control
an Android device, whether it's a real device or an emulator instance.
You can also register your own services to the device's service manager.
See the docs of `service_manager` and `base_service` for details.
Attributes:
serial: A string that's the serial number of the Androi device.
log_path: A string that is the path where all logs collected on this
android device should be stored.
log: A logger adapted from root logger with an added prefix specific
to an AndroidDevice instance. The default prefix is
[AndroidDevice|<serial>]. Use self.debug_tag = 'tag' to use a
different tag in the prefix.
adb_logcat_file_path: A string that's the full path to the adb logcat
file collected, if any.
adb: An AdbProxy object used for interacting with the device via adb.
fastboot: A FastbootProxy object used for interacting with the device
via fastboot.
services: ServiceManager, the manager of long-running services on the
device.
"""
def __init__(self, serial=''):
self._serial = str(serial)
# logging.log_path only exists when this is used in an Mobly test run.
self._log_path_base = getattr(logging, 'log_path', '/tmp/logs')
self._log_path = os.path.join(self._log_path_base,
'AndroidDevice%s' % self._normalized_serial)
self._debug_tag = self._serial
self.log = AndroidDeviceLoggerAdapter(logging.getLogger(),
{'tag': self.debug_tag})
self._build_info = None
self._is_rebooting = False
self.adb = adb.AdbProxy(serial)
self.fastboot = fastboot.FastbootProxy(serial)
if self.is_rootable:
self.root_adb()
self.services = service_manager.ServiceManager(self)
self.services.register(SERVICE_NAME_LOGCAT,
logcat.Logcat,
start_service=False)
self.services.register('snippets',
snippet_management_service.SnippetManagementService)
# Device info cache.
self._user_added_device_info = {}
def __repr__(self):
return '<AndroidDevice|%s>' % self.debug_tag
@property
def adb_logcat_file_path(self):
if self.services.has_service_by_name(SERVICE_NAME_LOGCAT):
return self.services.logcat.adb_logcat_file_path
@property
def _normalized_serial(self):
"""Normalized serial name for usage in log filename.
Some Android emulators use ip:port as their serial names, while on
Windows `:` is not valid in filename, it should be sanitized first.
"""
if self._serial is None:
return None
return mobly_logger.sanitize_filename(self._serial)
@property
def device_info(self):
"""Information to be pulled into controller info.
The latest serial, model, and build_info are included. Additional info
can be added via `add_device_info`.
"""
info = {
'serial': self.serial,
'model': self.model,
'build_info': self.build_info,
'user_added_info': self._user_added_device_info
}
return info
def add_device_info(self, name, info):
"""Add information of the device to be pulled into controller info.
Adding the same info name the second time will override existing info.
Args:
name: string, name of this info.
info: serializable, content of the info.
"""
self._user_added_device_info.update({name: info})
@property
def sl4a(self):
"""Attribute for direct access of sl4a client.
Not recommended. This is here for backward compatibility reasons.
Preferred: directly access `ad.services.sl4a`.
"""
if self.services.has_service_by_name('sl4a'):
return self.services.sl4a
@property
def ed(self):
"""Attribute for direct access of sl4a's event dispatcher.
Not recommended. This is here for backward compatibility reasons.
Preferred: directly access `ad.services.sl4a.ed`.
"""
if self.services.has_service_by_name('sl4a'):
return self.services.sl4a.ed
@property
def debug_tag(self):
"""A string that represents a device object in debug info. Default value
is the device serial.
This will be used as part of the prefix of debugging messages emitted by
this device object, like log lines and the message of DeviceError.
"""
return self._debug_tag
@debug_tag.setter
def debug_tag(self, tag):
"""Setter for the debug tag.
By default, the tag is the serial of the device, but sometimes it may
be more descriptive to use a different tag of the user's choice.
Changing debug tag changes part of the prefix of debug info emitted by
this object, like log lines and the message of DeviceError.
Example:
By default, the device's serial number is used:
'INFO [AndroidDevice|abcdefg12345] One pending call ringing.'
The tag can be customized with `ad.debug_tag = 'Caller'`:
'INFO [AndroidDevice|Caller] One pending call ringing.'
"""
self.log.info('Logging debug tag set to "%s"', tag)
self._debug_tag = tag
self.log.extra['tag'] = tag
@property
def has_active_service(self):
"""True if any service is running on the device.
A service can be a snippet or logcat collection.
"""
return self.services.is_any_alive
@property
def log_path(self):
"""A string that is the path for all logs collected from this device.
"""
return self._log_path
@log_path.setter
def log_path(self, new_path):
"""Setter for `log_path`, use with caution."""
if self.has_active_service:
raise DeviceError(
self, 'Cannot change `log_path` when there is service running.')
old_path = self._log_path
if new_path == old_path:
return
if os.listdir(new_path):
raise DeviceError(self,
'Logs already exist at %s, cannot override.' % new_path)
if os.path.exists(old_path):
# Remove new path so copytree doesn't complain.
shutil.rmtree(new_path, ignore_errors=True)
shutil.copytree(old_path, new_path)
shutil.rmtree(old_path, ignore_errors=True)
self._log_path = new_path
@property
def serial(self):
"""The serial number used to identify a device.
This is essentially the value used for adb's `-s` arg, which means it
can be a network address or USB bus number.
"""
return self._serial
def update_serial(self, new_serial):
"""Updates the serial number of a device.
The "serial number" used with adb's `-s` arg is not necessarily the
actual serial number. For remote devices, it could be a combination of
host names and port numbers.
This is used for when such identifier of remote devices changes during
a test. For example, when a remote device reboots, it may come back
with a different serial number.
This is NOT meant for switching the object to represent another device.
We intentionally did not make it a regular setter of the serial
property so people don't accidentally call this without understanding
the consequences.
Args:
new_serial: string, the new serial number for the same device.
Raises:
DeviceError: tries to update serial when any service is running.
"""
new_serial = str(new_serial)
if self.has_active_service:
raise DeviceError(
self,
'Cannot change device serial number when there is service running.')
if self._debug_tag == self.serial:
self._debug_tag = new_serial
self._serial = new_serial
self.adb.serial = new_serial
self.fastboot.serial = new_serial
@contextlib.contextmanager
def handle_reboot(self):
"""Properly manage the service life cycle when the device needs to
temporarily disconnect.
The device can temporarily lose adb connection due to user-triggered
reboot. Use this function to make sure the services
started by Mobly are properly stopped and restored afterwards.
For sample usage, see self.reboot().
"""
live_service_names = self.services.list_live_services()
self.services.stop_all()
# On rooted devices, system properties may change on reboot, so disable
# the `build_info` cache by setting `_is_rebooting` to True and
# repopulate it after reboot.
# Note, this logic assumes that instance variable assignment in Python
# is atomic; otherwise, `threading` data structures would be necessary.
# Additionally, nesting calls to `handle_reboot` while changing the
# read-only property values during reboot will result in stale values.
self._is_rebooting = True
try:
yield
finally:
self.wait_for_boot_completion()
# On boot completion, invalidate the `build_info` cache since any
# value it had from before boot completion is potentially invalid.
# If the value gets set after the final invalidation and before
# setting`_is_rebooting` to True, then that's okay because the
# device has finished rebooting at that point, and values at that
# point should be valid.
# If the reboot fails for some reason, then `_is_rebooting` is never
# set to False, which means the `build_info` cache remains disabled
# until the next reboot. This is relatively okay because the
# `build_info` cache is only minimizes adb commands.
self._build_info = None
self._is_rebooting = False
if self.is_rootable:
self.root_adb()
self.services.start_services(live_service_names)
@contextlib.contextmanager
def handle_usb_disconnect(self):
"""Properly manage the service life cycle when USB is disconnected.
The device can temporarily lose adb connection due to user-triggered
USB disconnection, e.g. the following cases can be handled by this
method:
* Power measurement: Using Monsoon device to measure battery consumption
would potentially disconnect USB.
* Unplug USB so device loses connection.
* ADB connection over WiFi and WiFi got disconnected.
* Any other type of USB disconnection, as long as snippet session can
be kept alive while USB disconnected (reboot caused USB
disconnection is not one of these cases because snippet session
cannot survive reboot.
Use handle_reboot() instead).
Use this function to make sure the services started by Mobly are
properly reconnected afterwards.
Just like the usage of self.handle_reboot(), this method does not
automatically detect if the disconnection is because of a reboot or USB
disconnect. Users of this function should make sure the right handle_*
function is used to handle the correct type of disconnection.
This method also reconnects snippet event client. Therefore, the
callback objects created (by calling Async RPC methods) before
disconnection would still be valid and can be used to retrieve RPC
execution result after device got reconnected.
Example Usage:
.. code-block:: python
with ad.handle_usb_disconnect():
try:
# User action that triggers USB disconnect, could throw
# exceptions.
do_something()
finally:
# User action that triggers USB reconnect
action_that_reconnects_usb()
# Make sure device is reconnected before returning from this
# context
ad.adb.wait_for_device(timeout=SOME_TIMEOUT)
"""
live_service_names = self.services.list_live_services()
self.services.pause_all()
try:
yield
finally:
self.services.resume_services(live_service_names)
@property
def build_info(self):
"""Get the build info of this Android device, including build id and
build type.
This is not available if the device is in bootloader mode.
Returns:
A dict with the build info of this Android device, or None if the
device is in bootloader mode.
"""
if self.is_bootloader:
self.log.error('Device is in fastboot mode, could not get build ' 'info.')
return
if self._build_info is None or self._is_rebooting:
info = {}
build_info = self.adb.getprops(CACHED_SYSTEM_PROPS)
info['build_id'] = build_info['ro.build.id']
info['build_type'] = build_info['ro.build.type']
info['build_version_codename'] = build_info.get(
'ro.build.version.codename', '')
info['build_version_sdk'] = build_info.get('ro.build.version.sdk', '')
info['build_product'] = build_info.get('ro.build.product', '')
info['build_characteristics'] = build_info.get('ro.build.characteristics',
'')
info['debuggable'] = build_info.get('ro.debuggable', '')
info['product_name'] = build_info.get('ro.product.name', '')
info['hardware'] = build_info.get('ro.hardware', '')
self._build_info = info
return info
return self._build_info
@property
def is_bootloader(self):
"""True if the device is in bootloader mode.
"""
return self.serial in list_fastboot_devices()
@property
def is_adb_root(self):
"""True if adb is running as root for this device.
"""
try:
return '0' == self.adb.shell('id -u').decode('utf-8').strip()
except adb.AdbError:
# Wait a bit and retry to work around adb flakiness for this cmd.
time.sleep(0.2)
return '0' == self.adb.shell('id -u').decode('utf-8').strip()
@property
def is_rootable(self):
return not self.is_bootloader and self.build_info['debuggable'] == '1'
@property
def model(self):
"""The Android code name for the device.
"""
# If device is in bootloader mode, get mode name from fastboot.
if self.is_bootloader:
out = self.fastboot.getvar('product').strip()
# 'out' is never empty because of the 'total time' message fastboot
# writes to stderr.
lines = out.decode('utf-8').split('\n', 1)
if lines:
tokens = lines[0].split(' ')
if len(tokens) > 1:
return tokens[1].lower()
return None
model = self.build_info['build_product'].lower()
if model == 'sprout':
return model
return self.build_info['product_name'].lower()
@property
def is_emulator(self):
"""Whether this device is probably an emulator.
Returns:
True if this is probably an emulator.
"""
if EMULATOR_SERIAL_REGEX.match(self.serial):
# If the device's serial follows 'emulator-dddd', then it's almost
# certainly an emulator.
return True
elif self.build_info['build_characteristics'] == 'emulator':
# If the device says that it's an emulator, then it's probably an
# emulator although some real devices apparently report themselves
# as emulators in addition to other things, so only return True on
# an exact match.
return True
elif self.build_info['hardware'] in ['ranchu', 'goldfish']:
# Ranchu and Goldfish are the hardware properties that the AOSP
# emulators report, so if the device says it's an AOSP emulator, it
# probably is one.
return True
else:
return False
def load_config(self, config):
"""Add attributes to the AndroidDevice object based on config.
Args:
config: A dictionary representing the configs.
Raises:
Error: The config is trying to overwrite an existing attribute.
"""
for k, v in config.items():
if hasattr(self, k) and k not in _ANDROID_DEVICE_SETTABLE_PROPS:
raise DeviceError(
self, ('Attribute %s already exists with value %s, cannot set '
'again.') % (k, getattr(self, k)))
setattr(self, k, v)
def root_adb(self):
"""Change adb to root mode for this device if allowed.
If executed on a production build, adb will not be switched to root
mode per security restrictions.
"""
self.adb.root()
# `root` causes the device to temporarily disappear from adb.
# So we need to wait for the device to come back before proceeding.
self.adb.wait_for_device(timeout=DEFAULT_TIMEOUT_BOOT_COMPLETION_SECOND)
def load_snippet(self, name, package):
"""Starts the snippet apk with the given package name and connects.
Examples:
.. code-block:: python
ad.load_snippet(
name='maps', package='com.google.maps.snippets')
ad.maps.activateZoom('3')
Args:
name: string, the attribute name to which to attach the snippet
client. E.g. `name='maps'` attaches the snippet client to
`ad.maps`.
package: string, the package name of the snippet apk to connect to.
Raises:
SnippetError: Illegal load operations are attempted.
"""
# Should not load snippet with an existing attribute.
if hasattr(self, name):
raise SnippetError(
self,
'Attribute "%s" already exists, please use a different name.' % name)
self.services.snippets.add_snippet_client(name, package)
def unload_snippet(self, name):
"""Stops a snippet apk.
Args:
name: The attribute name the snippet server is attached with.
Raises:
SnippetError: The given snippet name is not registered.
"""
self.services.snippets.remove_snippet_client(name)
def generate_filename(self,
file_type,
time_identifier=None,
extension_name=None):
"""Generates a name for an output file related to this device.
The name follows the pattern:
{file type},{debug_tag},{serial},{model},{time identifier}.{ext}
"debug_tag" is only added if it's different from the serial. "ext" is
added if specified by user.
Args:
file_type: string, type of this file, like "logcat" etc.
time_identifier: string or RuntimeTestInfo. If a `RuntimeTestInfo`
is passed in, the `signature` of the test case will be used. If
a string is passed in, the string itself will be used.
Otherwise the current timestamp will be used.
extension_name: string, the extension name of the file.
Returns:
String, the filename generated.
"""
time_str = time_identifier
if time_identifier is None:
time_str = mobly_logger.get_log_file_timestamp()
elif isinstance(time_identifier, runtime_test_info.RuntimeTestInfo):
time_str = time_identifier.signature
filename_tokens = [file_type]
if self.debug_tag != self.serial:
filename_tokens.append(self.debug_tag)
filename_tokens.extend([self.serial, self.model, time_str])
filename_str = ','.join(filename_tokens)
if extension_name is not None:
filename_str = '%s.%s' % (filename_str, extension_name)
filename_str = mobly_logger.sanitize_filename(filename_str)
self.log.debug('Generated filename: %s', filename_str)
return filename_str
def take_bug_report(self,
test_name=None,
begin_time=None,
timeout=300,
destination=None):
"""Takes a bug report on the device and stores it in a file.
Args:
test_name: Name of the test method that triggered this bug report.
begin_time: Timestamp of when the test started. If not set, then
this will default to the current time.
timeout: float, the number of seconds to wait for bugreport to
complete, default is 5min.
destination: string, path to the directory where the bugreport
should be saved.
Returns:
A string that is the absolute path to the bug report on the host.
"""
prefix = DEFAULT_BUG_REPORT_NAME
if test_name:
prefix = '%s,%s' % (DEFAULT_BUG_REPORT_NAME, test_name)
if begin_time is None:
begin_time = mobly_logger.get_log_file_timestamp()
new_br = True
try:
stdout = self.adb.shell('bugreportz -v').decode('utf-8')
# This check is necessary for builds before N, where adb shell's ret
# code and stderr are not propagated properly.
if 'not found' in stdout:
new_br = False
except adb.AdbError:
new_br = False
if destination is None:
destination = os.path.join(self.log_path, 'BugReports')
br_path = utils.abs_path(destination)
utils.create_dir(br_path)
filename = self.generate_filename(prefix, str(begin_time), 'txt')
if new_br:
filename = filename.replace('.txt', '.zip')
full_out_path = os.path.join(br_path, filename)
# in case device restarted, wait for adb interface to return
self.wait_for_boot_completion()
self.log.debug('Start taking bugreport.')
if new_br:
out = self.adb.shell('bugreportz', timeout=timeout).decode('utf-8')
if not out.startswith('OK'):
raise DeviceError(self, 'Failed to take bugreport: %s' % out)
br_out_path = out.split(':')[1].strip()
self.adb.pull([br_out_path, full_out_path])
self.adb.shell(['rm', br_out_path])
else:
# shell=True as this command redirects the stdout to a local file
# using shell redirection.
self.adb.bugreport(' > "%s"' % full_out_path, shell=True, timeout=timeout)
self.log.debug('Bugreport taken at %s.', full_out_path)
return full_out_path
def take_screenshot(self, destination, prefix='screenshot'):
"""Takes a screenshot of the device.
Args:
destination: string, full path to the directory to save in.
prefix: string, prefix file name of the screenshot.
Returns:
string, full path to the screenshot file on the host.
"""
filename = self.generate_filename(prefix, extension_name='png')
device_path = os.path.join('/storage/emulated/0/', filename)
self.adb.shell(['screencap', '-p', device_path],
timeout=TAKE_SCREENSHOT_TIMEOUT_SECOND)
utils.create_dir(destination)
self.adb.pull([device_path, destination])
pic_path = os.path.join(destination, filename)
self.log.debug('Screenshot taken, saved on the host: %s', pic_path)
self.adb.shell(['rm', device_path])
return pic_path
def run_iperf_client(self, server_host, extra_args=''):
"""Start iperf client on the device.
Return status as true if iperf client start successfully.
And data flow information as results.
Args:
server_host: Address of the iperf server.
extra_args: A string representing extra arguments for iperf client,
e.g. '-i 1 -t 30'.
Returns:
status: true if iperf client start successfully.
results: results have data flow information
"""
out = self.adb.shell('iperf3 -c %s %s' % (server_host, extra_args))
clean_out = str(out, 'utf-8').strip().split('\n')
if 'error' in clean_out[0].lower():
return False, clean_out
return True, clean_out
def wait_for_boot_completion(self,
timeout=DEFAULT_TIMEOUT_BOOT_COMPLETION_SECOND):
"""Waits for Android framework to broadcast ACTION_BOOT_COMPLETED.
This function times out after 15 minutes.
Args:
timeout: float, the number of seconds to wait before timing out.
If not specified, no timeout takes effect.
"""
deadline = time.perf_counter() + timeout
self.adb.wait_for_device(timeout=timeout)
while time.perf_counter() < deadline:
try:
if self.is_boot_completed():
return
except (adb.AdbError, adb.AdbTimeoutError):
# adb shell calls may fail during certain period of booting
# process, which is normal. Ignoring these errors.
pass
time.sleep(5)
raise DeviceError(self, 'Booting process timed out')
def is_boot_completed(self):
"""Checks if device boot is completed by verifying system property."""
completed = self.adb.getprop('sys.boot_completed')
if completed == '1':
self.log.debug('Device boot completed.')
return True
return False
def is_adb_detectable(self):
"""Checks if USB is on and device is ready by verifying adb devices."""
serials = list_adb_devices()
if self.serial in serials:
self.log.debug('Is now adb detectable.')
return True
return False
def reboot(self):
"""Reboots the device.
Generally one should use this method to reboot the device instead of
directly calling `adb.reboot`. Because this method gracefully handles
the teardown and restoration of running services.
This method is blocking and only returns when the reboot has completed
and the services restored.
Raises:
Error: Waiting for completion timed out.
"""
if self.is_bootloader:
self.fastboot.reboot()
return
with self.handle_reboot():
self.adb.reboot()
def __getattr__(self, name):
"""Tries to return a snippet client registered with `name`.
This is for backward compatibility of direct accessing snippet clients.
"""
client = self.services.snippets.get_snippet_client(name)
if client:
return client
return self.__getattribute__(name)
# Properties in AndroidDevice that have setters.
# This line has to live below the AndroidDevice code.
_ANDROID_DEVICE_SETTABLE_PROPS = utils.get_settable_properties(AndroidDevice)
class AndroidDeviceLoggerAdapter(logging.LoggerAdapter):
"""A wrapper class that adds a prefix to each log line.
Usage:
.. code-block:: python
my_log = AndroidDeviceLoggerAdapter(logging.getLogger(), {
'tag': <custom tag>
})
Then each log line added by my_log will have a prefix
'[AndroidDevice|<tag>]'
"""
def process(self, msg, kwargs):
msg = _DEBUG_PREFIX_TEMPLATE % (self.extra['tag'], msg)
return (msg, kwargs)
|
apache-2.0
| 7,065,136,944,551,025,000 | -1,235,029,343,678,719,500 | 32.769834 | 84 | 0.677286 | false |
benjaminbrinkman/open-ad-platform
|
.venv/lib/python3.4/site-packages/_markerlib/markers.py
|
1769
|
3979
|
# -*- coding: utf-8 -*-
"""Interpret PEP 345 environment markers.
EXPR [in|==|!=|not in] EXPR [or|and] ...
where EXPR belongs to any of those:
python_version = '%s.%s' % (sys.version_info[0], sys.version_info[1])
python_full_version = sys.version.split()[0]
os.name = os.name
sys.platform = sys.platform
platform.version = platform.version()
platform.machine = platform.machine()
platform.python_implementation = platform.python_implementation()
a free string, like '2.6', or 'win32'
"""
__all__ = ['default_environment', 'compile', 'interpret']
import ast
import os
import platform
import sys
import weakref
_builtin_compile = compile
try:
from platform import python_implementation
except ImportError:
if os.name == "java":
# Jython 2.5 has ast module, but not platform.python_implementation() function.
def python_implementation():
return "Jython"
else:
raise
# restricted set of variables
_VARS = {'sys.platform': sys.platform,
'python_version': '%s.%s' % sys.version_info[:2],
# FIXME parsing sys.platform is not reliable, but there is no other
# way to get e.g. 2.7.2+, and the PEP is defined with sys.version
'python_full_version': sys.version.split(' ', 1)[0],
'os.name': os.name,
'platform.version': platform.version(),
'platform.machine': platform.machine(),
'platform.python_implementation': python_implementation(),
'extra': None # wheel extension
}
for var in list(_VARS.keys()):
if '.' in var:
_VARS[var.replace('.', '_')] = _VARS[var]
def default_environment():
"""Return copy of default PEP 385 globals dictionary."""
return dict(_VARS)
class ASTWhitelist(ast.NodeTransformer):
def __init__(self, statement):
self.statement = statement # for error messages
ALLOWED = (ast.Compare, ast.BoolOp, ast.Attribute, ast.Name, ast.Load, ast.Str)
# Bool operations
ALLOWED += (ast.And, ast.Or)
# Comparison operations
ALLOWED += (ast.Eq, ast.Gt, ast.GtE, ast.In, ast.Is, ast.IsNot, ast.Lt, ast.LtE, ast.NotEq, ast.NotIn)
def visit(self, node):
"""Ensure statement only contains allowed nodes."""
if not isinstance(node, self.ALLOWED):
raise SyntaxError('Not allowed in environment markers.\n%s\n%s' %
(self.statement,
(' ' * node.col_offset) + '^'))
return ast.NodeTransformer.visit(self, node)
def visit_Attribute(self, node):
"""Flatten one level of attribute access."""
new_node = ast.Name("%s.%s" % (node.value.id, node.attr), node.ctx)
return ast.copy_location(new_node, node)
def parse_marker(marker):
tree = ast.parse(marker, mode='eval')
new_tree = ASTWhitelist(marker).generic_visit(tree)
return new_tree
def compile_marker(parsed_marker):
return _builtin_compile(parsed_marker, '<environment marker>', 'eval',
dont_inherit=True)
_cache = weakref.WeakValueDictionary()
def compile(marker):
"""Return compiled marker as a function accepting an environment dict."""
try:
return _cache[marker]
except KeyError:
pass
if not marker.strip():
def marker_fn(environment=None, override=None):
""""""
return True
else:
compiled_marker = compile_marker(parse_marker(marker))
def marker_fn(environment=None, override=None):
"""override updates environment"""
if override is None:
override = {}
if environment is None:
environment = default_environment()
environment.update(override)
return eval(compiled_marker, environment)
marker_fn.__doc__ = marker
_cache[marker] = marker_fn
return _cache[marker]
def interpret(marker, environment=None):
return compile(marker)(environment)
|
mit
| -1,996,727,246,950,957,000 | 5,918,407,029,790,745,000 | 32.436975 | 106 | 0.621262 | false |
neilhan/tensorflow
|
tensorflow/python/ops/histogram_ops.py
|
12
|
3799
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""## Histograms
@@histogram_fixed_width
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import math_ops
def histogram_fixed_width(values,
value_range,
nbins=100,
dtype=dtypes.int32,
name=None):
"""Return histogram of values.
Given the tensor `values`, this operation returns a rank 1 histogram counting
the number of entries in `values` that fell into every bin. The bins are
equal width and determined by the arguments `value_range` and `nbins`.
Args:
values: Numeric `Tensor`.
value_range: Shape [2] `Tensor`. new_values <= value_range[0] will be
mapped to hist[0], values >= value_range[1] will be mapped to hist[-1].
Must be same dtype as new_values.
nbins: Scalar `int32 Tensor`. Number of histogram bins.
dtype: dtype for returned histogram.
name: A name for this operation (defaults to 'histogram_fixed_width').
Returns:
A 1-D `Tensor` holding histogram of values.
Examples:
```python
# Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
nbins = 5
value_range = [0.0, 5.0]
new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
with tf.default_session() as sess:
hist = tf.histogram_fixed_width(new_values, value_range, nbins=5)
variables.initialize_all_variables().run()
sess.run(hist) => [2, 1, 1, 0, 2]
```
"""
with ops.name_scope(name, 'histogram_fixed_width',
[values, value_range, nbins]) as scope:
values = ops.convert_to_tensor(values, name='values')
values = array_ops.reshape(values, [-1])
value_range = ops.convert_to_tensor(value_range, name='value_range')
nbins = ops.convert_to_tensor(nbins, dtype=dtypes.int32, name='nbins')
nbins_float = math_ops.to_float(nbins)
# Map tensor values that fall within value_range to [0, 1].
scaled_values = math_ops.truediv(values - value_range[0],
value_range[1] - value_range[0],
name='scaled_values')
# map tensor values within the open interval value_range to {0,.., nbins-1},
# values outside the open interval will be zero or less, or nbins or more.
indices = math_ops.floor(nbins_float * scaled_values, name='indices')
# Clip edge cases (e.g. value = value_range[1]) or "outliers."
indices = math_ops.cast(
clip_ops.clip_by_value(indices, 0, nbins_float - 1), dtypes.int32)
# TODO(langmore) This creates an array of ones to add up and place in the
# bins. This is inefficient, so replace when a better Op is available.
return math_ops.unsorted_segment_sum(
array_ops.ones_like(indices, dtype=dtype),
indices,
nbins,
name=scope)
|
apache-2.0
| 8,591,057,422,273,669,000 | -2,774,180,243,736,061,400 | 38.572917 | 80 | 0.644643 | false |
ccpgames/eve-metrics
|
web2py/applications/admin/languages/ru-ru.py
|
1
|
23612
|
# coding: utf8
{
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"Update" ist ein optionaler Ausdruck wie "Feld1 = \'newvalue". JOIN Ergebnisse können nicht aktualisiert oder gelöscht werden',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'%s rows deleted': '%s строк удалено',
'%s rows updated': '%s строк обновлено',
'(requires internet access)': '(требует подключения к интернету)',
'(something like "it-it")': '(наподобие "it-it")',
'A new version of web2py is available': 'Доступна новая версия web2py',
'A new version of web2py is available: %s': 'Доступна новая версия web2py: %s',
'A new version of web2py is available: Version 1.85.3 (2010-09-18 07:07:46)\n': 'Доступна новая версия web2py: Версия 1.85.3 (2010-09-18 07:07:46)\n',
'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': 'ВНИМАНИЕ: Для входа требуется бесопасное (HTTPS) соединение либо запуск на localhost.',
'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': 'ВНИМАНИЕ: Тестирование не потокобезопасно, поэтому не запускайте несколько тестов параллельно.',
'ATTENTION: This is an experimental feature and it needs more testing.': 'ВНИМАНИЕ: Это экспериментальная возможность и требует тестирования.',
'ATTENTION: you cannot edit the running application!': 'ВНИМАНИЕ: Вы не можете редактировать работающее приложение!',
'Abort': 'Отмена',
'About': 'О',
'About application': 'О приложении',
'Additional code for your application': 'Допольнительный код для вашего приложения',
'Admin is disabled because insecure channel': 'Админпанель выключена из-за небезопасного соединения',
'Admin is disabled because unsecure channel': 'Админпанель выключен из-за небезопасного соединения',
'Admin language': 'Язык админпанели',
'Administrator Password:': 'Пароль администратора:',
'Application name:': 'Название приложения:',
'Are you sure you want to delete file "%s"?': 'Вы действительно хотите удалить файл "%s"?',
'Are you sure you want to uninstall application "%s"': 'Вы действительно хотите удалить приложение "%s"',
'Are you sure you want to uninstall application "%s"?': 'Вы действительно хотите удалить приложение "%s"?',
'Are you sure you want to upgrade web2py now?': 'Вы действительно хотите обновить web2py сейчас?',
'Authentication': 'Аутентификация',
'Available databases and tables': 'Доступные базы данных и таблицы',
'Cannot be empty': 'Не может быть пустым',
'Cannot compile: there are errors in your app. Debug it, correct errors and try again.': 'Невозможно компилировать: в приложении присутствуют ошибки. Отладьте его, исправьте ошибки и попробуйте заново.',
'Change Password': 'Изменить пароль',
'Check to delete': 'Поставьте для удаления',
'Checking for upgrades...': 'Проверка обновлений...',
'Client IP': 'IP клиента',
'Controller': 'Контроллер',
'Controllers': 'Контроллеры',
'Copyright': 'Copyright',
'Create new simple application': 'Создать новое простое приложение',
'Current request': 'Текущий запрос',
'Current response': 'Текущий ответ',
'Current session': 'Текущая сессия',
'DB Model': 'Модель БД',
'DESIGN': 'ДИЗАЙН',
'Database': 'База данных',
'Date and Time': 'Дата и время',
'Delete': 'Удалить',
'Delete:': 'Удалить:',
'Deploy on Google App Engine': 'Развернуть на Google App Engine',
'Description': 'Описание',
'Design for': 'Дизайн для',
'E-mail': 'E-mail',
'EDIT': 'ПРАВКА',
'Edit': 'Правка',
'Edit Profile': 'Правка профиля',
'Edit This App': 'Правка данного приложения',
'Edit application': 'Правка приложения',
'Edit current record': 'Правка текущей записи',
'Editing Language file': 'Правка языкового файла',
'Editing file': 'Правка файла',
'Editing file "%s"': 'Правка файла "%s"',
'Enterprise Web Framework': 'Enterprise Web Framework',
'Error logs for "%(app)s"': 'Журнал ошибок для "%(app)"',
'Exception instance attributes': 'Атрибуты объекта исключения',
'Expand Abbreviation': 'Раскрыть аббревиатуру',
'First name': 'Имя',
'Functions with no doctests will result in [passed] tests.': 'Функции без doctest будут давать [прошел] в тестах.',
'Go to Matching Pair': 'К подходящей паре',
'Group ID': 'ID группы',
'Hello World': 'Привет, Мир',
'If the report above contains a ticket number it indicates a failure in executing the controller, before any attempt to execute the doctests. This is usually due to an indentation error or an error outside function code.\nA green title indicates that all tests (if defined) passed. In this case test results are not shown.': 'Если отчет выше содержит номер ошибки, это указывает на ошибку при работе контроллера, до попытки выполнить doctest. Причиной чаще является неверные отступы или ошибки в коде вне функции. \nЗеленый заголовок указывает на успешное выполнение всех тестов. В этом случае результаты тестов не показываются.',
'If you answer "yes", be patient, it may take a while to download': 'Если вы ответили "Да", потерпите, загрузка может потребовать времени',
'If you answer yes, be patient, it may take a while to download': 'Если вы ответили "Да", потерпите, загрузка может потребовать времени',
'Import/Export': 'Импорт/Экспорт',
'Index': 'Индекс',
'Installed applications': 'Установленные приложения',
'Internal State': 'Внутренний статус',
'Invalid Query': 'Неверный запрос',
'Invalid action': 'Неверное действие',
'Invalid email': 'Неверный email',
'Key bindings': 'Связываник клавиш',
'Key bindings for ZenConding Plugin': 'Связывание клавиш для плагина ZenConding',
'Language files (static strings) updated': 'Языковые файлы (статичные строки) обновлены',
'Languages': 'Языки',
'Last name': 'Фамилия',
'Last saved on:': 'Последнее сохранение:',
'Layout': 'Верстка',
'License for': 'Лицензия для',
'Login': 'Логин',
'Login to the Administrative Interface': 'Вход в интерфейс администратора',
'Logout': 'Выход',
'Lost Password': 'Забыли пароль',
'Main Menu': 'Главное меню',
'Match Pair': 'Найти пару',
'Menu Model': 'Модель меню',
'Merge Lines': 'Объединить линии',
'Models': 'Модели',
'Modules': 'Модули',
'NO': 'НЕТ',
'Name': 'Название',
'New Record': 'Новая запись',
'New application wizard': 'Мастер нового приложения',
'New simple application': 'Новое простое приложение',
'Next Edit Point': 'Следующее место правки',
'No databases in this application': 'В приложении нет базы данных',
'Origin': 'Оригинал',
'Original/Translation': 'Оригинал/Перевод',
'Password': 'Пароль',
'Peeking at file': 'Просмотр',
'Plugin "%s" in application': 'Плагин "%s" в приложении',
'Plugins': 'Плагины',
'Powered by': 'Обеспечен',
'Previous Edit Point': 'Предыдущее место правки',
'Query:': 'Запрос:',
'Record ID': 'ID записи',
'Register': 'Зарегистрироваться',
'Registration key': 'Ключ регистрации',
'Reset Password key': 'Сброс пароля',
'Resolve Conflict file': 'Решить конфликт в файле',
'Role': 'Роль',
'Rows in table': 'Строк в таблице',
'Rows selected': 'Выбрано строк',
'Save via Ajax': 'Сохранить через Ajax',
'Saved file hash:': 'Хэш сохраненного файла:',
'Searching:': 'Поиск:',
'Static files': 'Статические файлы',
'Stylesheet': 'Таблицы стилей',
'Sure you want to delete this object?': 'Действительно хотите удалить данный объект?',
'TM': 'TM',
'Table name': 'Название таблицы',
'Testing application': 'Тест приложения',
'Testing controller': 'Тест контроллера',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"query" является условием вида "db.table1.field1 == \'значение\'". Что-либо типа "db.table1.field1 db.table2.field2 ==" ведет к SQL JOIN.',
'The application logic, each URL path is mapped in one exposed function in the controller': 'Логика приложения, каждый URL отображается к одной функции в контроллере',
'The data representation, define database tables and sets': 'Представление данных, определите таблицы базы данных и наборы',
'The output of the file is a dictionary that was rendered by the view': 'Выводом файла является словарь, который создан в виде',
'The presentations layer, views are also known as templates': 'Слой презентации, виды так же известны, как шаблоны',
'There are no controllers': 'Отсутствуют контроллеры',
'There are no models': 'Отсутствуют модели',
'There are no modules': 'Отсутствуют модули',
'There are no plugins': 'Отсутствуют плагины',
'There are no static files': 'Отсутствуют статичные файлы',
'There are no translators, only default language is supported': 'Отсутствуют переводчики, поддерживается только стандартный язык',
'There are no views': 'Отсутствуют виды',
'These files are served without processing, your images go here': 'Эти файлы обслуживаются без обработки, ваши изображения попадут сюда',
'This is a copy of the scaffolding application': 'Это копия сгенерированного приложения',
'This is the %(filename)s template': 'Это шаблон %(filename)s',
'Ticket': 'Тикет',
'Timestamp': 'Время',
'To create a plugin, name a file/folder plugin_[name]': 'Для создания плагина назовите файл/папку plugin_[название]',
'Translation strings for the application': 'Строки перевода для приложения',
'Unable to check for upgrades': 'Невозможно проверить обновления',
'Unable to download': 'Невозможно загрузить',
'Unable to download app': 'Невозможно загрузить',
'Update:': 'Обновить:',
'Upload & install packed application': 'Загрузить и установить приложение в архиве',
'Upload a package:': 'Загрузить пакет:',
'Upload existing application': 'Загрузить существующее приложение',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Используйте (...)&(...) для AND, (...)|(...) для OR, и ~(...) для NOT при создании сложных запросов.',
'Use an url:': 'Используйте url:',
'User ID': 'ID пользователя',
'Version': 'Версия',
'View': 'Вид',
'Views': 'Виды',
'Welcome %s': 'Добро пожаловать, %s',
'Welcome to web2py': 'Добро пожаловать в web2py',
'Which called the function': 'Который вызвал функцию',
'Wrap with Abbreviation': 'Заключить в аббревиатуру',
'YES': 'ДА',
'You are successfully running web2py': 'Вы успешно запустили web2by',
'You can modify this application and adapt it to your needs': 'Вы можете изменить это приложение и подогнать под свои нужды',
'You visited the url': 'Вы посетили URL',
'About': 'О',
'additional code for your application': 'добавочный код для вашего приложения',
'admin disabled because no admin password': 'админка отключена, потому что отсутствует пароль администратора',
'admin disabled because not supported on google apps engine': 'админка отключена, т.к. не поддерживается на google app engine',
'admin disabled because unable to access password file': 'админка отключена, т.к. невозможно получить доступ к файлу с паролями ',
'administrative interface': 'интерфейс администратора',
'and rename it (required):': 'и переименуйте его (необходимо):',
'and rename it:': ' и переименуйте его:',
'appadmin': 'appadmin',
'appadmin is disabled because insecure channel': 'Appadmin отключен, т.к. соединение не безопасно',
'application "%s" uninstalled': 'Приложение "%s" удалено',
'application compiled': 'Приложение скомпилировано',
'application is compiled and cannot be designed': 'Приложение скомпилировано и дизайн не может быть изменен',
'arguments': 'аргументы',
'back': 'назад',
'beautify': 'Раскрасить',
'cache': 'кэш',
'cache, errors and sessions cleaned': 'Кэш, ошибки и сессии очищены',
'call': 'вызов',
'cannot create file': 'Невозможно создать файл',
'cannot upload file "%(filename)s"': 'Невозможно загрузить файл "%(filename)s"',
'Change admin password': 'Изменить пароль администратора',
'change password': 'изменить пароль',
'check all': 'проверить все',
'Check for upgrades': 'проверить обновления',
'Clean': 'Очистить',
'click here for online examples': 'нажмите здесь для онлайн примеров',
'click here for the administrative interface': 'нажмите здесь для интерфейса администратора',
'click to check for upgrades': 'нажмите для проверки обновления',
'code': 'код',
'collapse/expand all': 'свернуть/развернуть все',
'Compile': 'Компилировать',
'compiled application removed': 'скомпилированное приложение удалено',
'controllers': 'контроллеры',
'Create': 'Создать',
'create file with filename:': 'Создать файл с названием:',
'create new application:': 'создать новое приложение:',
'created by': 'создано',
'crontab': 'crontab',
'currently running': 'сейчас работает',
'currently saved or': 'сейчас сохранено или',
'customize me!': 'настрой меня!',
'data uploaded': 'дата загрузки',
'database': 'база данных',
'database %s select': 'Выбор базы данных %s ',
'database administration': 'администраторирование базы данных',
'db': 'бд',
'defines tables': 'определить таблицы',
'delete': 'удалить',
'delete all checked': 'удалить выбранные',
'delete plugin': 'удалить плагин',
'Deploy': 'Развернуть',
'design': 'дизайн',
'direction: ltr': 'направление: ltr',
'documentation': 'документация',
'done!': 'выполнено!',
'download layouts': 'загрузить шаблоны',
'download plugins': 'загрузить плагины',
'Edit': 'Правка',
'edit controller': 'правка контроллера',
'edit profile': 'правка профиля',
'edit views:': 'правка видов:',
'Errors': 'Ошибка',
'escape': 'escape',
'export as csv file': 'Экспорт в CSV',
'exposes': 'открывает',
'extends': 'расширяет',
'failed to reload module': 'невозможно загрузить модуль',
'file "%(filename)s" created': 'файл "%(filename)s" создан',
'file "%(filename)s" deleted': 'файл "%(filename)s" удален',
'file "%(filename)s" uploaded': 'файл "%(filename)s" загружен',
'file "%(filename)s" was not deleted': 'файл "%(filename)s" не был удален',
'file "%s" of %s restored': 'файл "%s" из %s восстановлен',
'file changed on disk': 'файл изменился на диске',
'file does not exist': 'файл не существует',
'file saved on %(time)s': 'файл сохранен %(time)s',
'file saved on %s': 'файл сохранен %s',
'files': 'файлы',
'filter': 'фильтр',
'Help': 'Помощь',
'htmledit': 'htmledit',
'includes': 'включает',
'index': 'index',
'insert new': 'вставить новый',
'insert new %s': 'вставить новый %s',
'Install': 'Установить',
'internal error': 'внутренняя ошибка',
'invalid password': 'неверный пароль',
'invalid request': 'неверный запрос',
'invalid ticket': 'неверный тикет',
'language file "%(filename)s" created/updated': 'Языковой файл "%(filename)s" создан/обновлен',
'languages': 'языки',
'languages updated': 'языки обновлены',
'loading...': 'загрузка...',
'located in the file': 'расположенный в файле',
'login': 'логин',
'Logout': 'выход',
'lost password?': 'Пароль утерен?',
'merge': 'объединить',
'models': 'модели',
'modules': 'модули',
'new application "%s" created': 'новое приложение "%s" создано',
'new record inserted': 'новая запись вставлена',
'next 100 rows': 'следующие 100 строк',
'or import from csv file': 'или испорт из cvs файла',
'or provide app url:': 'или URL приложения:',
'or provide application url:': 'или URL приложения:',
'Overwrite installed app': 'Переписать на установленное приложение',
'Pack all': 'упаковать все',
'Pack compiled': 'Архив скомпилирован',
'pack plugin': 'Упаковать плагин',
'please wait!': 'подождите, пожалуйста!',
'plugins': 'плагины',
'previous 100 rows': 'предыдущие 100 строк',
'record': 'запись',
'record does not exist': 'запись не существует',
'record id': 'id записи',
'register': 'зарегистрироваться',
'Remove compiled': 'Удалить скомпилированное',
'restore': 'восстановить',
'revert': 'откатиться',
'save': 'сохранить',
'selected': 'выбрано',
'session expired': 'сессия истекла',
'shell': 'shell',
'Site': 'сайт',
'some files could not be removed': 'некоторые файлы нельзя удалить',
'Start wizard': 'запустить мастер',
'state': 'статус',
'static': 'статичные файлы',
'submit': 'Отправить',
'table': 'таблица',
'test': 'тест',
'test_def': 'test_def',
'test_for': 'test_for',
'test_if': 'test_if',
'test_try': 'test_try',
'the application logic, each URL path is mapped in one exposed function in the controller': 'Логика приложения, каждый URL отображается в открытую функцию в контроллере',
'the data representation, define database tables and sets': 'представление данных, определить таблицы и наборы',
'the presentations layer, views are also known as templates': 'слой представления, виды известные так же как шаблоны',
'these files are served without processing, your images go here': 'Эти файлы обслуживаются без обработки, ваши изображения попадут сюда',
'to previous version.': 'на предыдущую версию.',
'translation strings for the application': 'строки перевода для приложения',
'try': 'try',
'try something like': 'попробовать что-либо вида',
'unable to create application "%s"': 'невозможно создать приложение "%s" nicht möglich',
'unable to delete file "%(filename)s"': 'невозможно удалить файл "%(filename)s"',
'unable to parse csv file': 'невозможно разобрать файл csv',
'unable to uninstall "%s"': 'невозможно удалить "%s"',
'uncheck all': 'снять выбор всего',
'Uninstall': 'Удалить',
'update': 'обновить',
'update all languages': 'обновить все языки',
'upgrade web2py now': 'обновить web2py сейчас',
'upload': 'загрузить',
'upload application:': 'загрузить файл:',
'upload file:': 'загрузить файл:',
'upload plugin file:': 'загрузить файл плагина:',
'user': 'пользователь',
'variables': 'переменные',
'versioning': 'версии',
'view': 'вид',
'views': 'виды',
'web2py Recent Tweets': 'последние твиты по web2py',
'web2py is up to date': 'web2py обновлен',
'xml': 'xml',
}
|
mit
| -1,497,447,793,822,088,700 | 1,285,878,316,911,505,400 | 49.516035 | 630 | 0.711998 | false |
egabancho/invenio
|
invenio/modules/oauth2server/provider.py
|
1
|
3287
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Configuration of flask-oauthlib provider."""
from datetime import datetime, timedelta
from flask import current_app
from flask.ext.login import current_user
from flask_oauthlib.provider import OAuth2Provider
from invenio.ext.sqlalchemy import db
from invenio.modules.accounts.models import User
from .models import Token, Client
oauth2 = OAuth2Provider()
@oauth2.usergetter
def get_user(username, password, *args, **kwargs):
"""Get user for grant type password.
Needed for grant type 'password'. Note, grant type password is by default
disabled.
"""
user = User.query.filter_by(username=username).first()
if user.check_password(password):
return user
@oauth2.tokengetter
def get_token(access_token=None, refresh_token=None):
"""Load an access token.
Add support for personal access tokens compared to flask-oauthlib
"""
if access_token:
t = Token.query.filter_by(access_token=access_token).first()
if t and t.is_personal:
t.expires = datetime.utcnow() + timedelta(
seconds=int(current_app.config.get(
'OAUTH2_PROVIDER_TOKEN_EXPIRES_IN'
))
)
return t
elif refresh_token:
return Token.query.join(Token.client).filter(
Token.refresh_token == refresh_token,
Token.is_personal == False,
Client.is_confidential == True,
).first()
else:
return None
@oauth2.tokensetter
def save_token(token, request, *args, **kwargs):
"""Token persistence."""
# Exclude the personal access tokens which doesn't expire.
uid = request.user.id if request.user else current_user.get_id()
tokens = Token.query.filter_by(
client_id=request.client.client_id,
user_id=uid,
is_personal=False,
)
# make sure that every client has only one token connected to a user
if tokens:
for tk in tokens:
db.session.delete(tk)
db.session.commit()
expires_in = token.get('expires_in')
expires = datetime.utcnow() + timedelta(seconds=int(expires_in))
tok = Token(
access_token=token['access_token'],
refresh_token=token.get('refresh_token'),
token_type=token['token_type'],
_scopes=token['scope'],
expires=expires,
client_id=request.client.client_id,
user_id=uid,
is_personal=False,
)
db.session.add(tok)
db.session.commit()
return tok
|
gpl-2.0
| -7,206,146,962,221,628,000 | -7,085,489,783,892,097,000 | 30.009434 | 77 | 0.662915 | false |
dsquareindia/scikit-learn
|
sklearn/decomposition/tests/test_fastica.py
|
70
|
7808
|
"""
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raises
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
|
bsd-3-clause
| -6,624,732,717,426,415,000 | -4,870,322,322,827,851,000 | 32.225532 | 79 | 0.58978 | false |
msebire/intellij-community
|
python/helpers/pydev/pydevconsole.py
|
1
|
12520
|
'''
Entry point module to start the interactive console.
'''
from _pydev_bundle._pydev_getopt import gnu_getopt
from _pydev_comm.rpc import make_rpc_client, start_rpc_server, start_rpc_server_and_make_client
from _pydev_imps._pydev_saved_modules import thread
start_new_thread = thread.start_new_thread
try:
from code import InteractiveConsole
except ImportError:
from _pydevd_bundle.pydevconsole_code_for_ironpython import InteractiveConsole
import os
import sys
from _pydev_imps._pydev_saved_modules import threading
from _pydevd_bundle.pydevd_constants import INTERACTIVE_MODE_AVAILABLE, dict_keys
from _pydevd_bundle.pydevd_utils import save_main_module
from _pydev_bundle import fix_getpass
fix_getpass.fix_getpass()
from _pydev_bundle.pydev_imports import _queue
try:
import __builtin__
except:
import builtins as __builtin__ # @UnresolvedImport
from _pydev_bundle.pydev_stdin import BaseStdIn
from _pydev_bundle.pydev_console_utils import BaseInterpreterInterface
from _pydev_bundle.pydev_console_types import Command
IS_PYTHON_3_ONWARDS = sys.version_info[0] >= 3
IS_PY24 = sys.version_info[0] == 2 and sys.version_info[1] == 4
try:
try:
execfile #Not in Py3k
except NameError:
from _pydev_bundle.pydev_imports import execfile
__builtin__.execfile = execfile
except:
pass
# Pull in runfile, the interface to UMD that wraps execfile
from _pydev_bundle.pydev_umd import runfile, _set_globals_function
if sys.version_info[0] >= 3:
import builtins # @UnresolvedImport
builtins.runfile = runfile
else:
import __builtin__
__builtin__.runfile = runfile
#=======================================================================================================================
# InterpreterInterface
#=======================================================================================================================
class InterpreterInterface(BaseInterpreterInterface):
'''
The methods in this class should be registered in the xml-rpc server.
'''
def __init__(self, mainThread, connect_status_queue=None, rpc_client=None):
BaseInterpreterInterface.__init__(self, mainThread, connect_status_queue, rpc_client)
self.namespace = {}
self.save_main()
self.interpreter = InteractiveConsole(self.namespace)
self._input_error_printed = False
def save_main(self):
m = save_main_module('<input>', 'pydevconsole')
self.namespace = m.__dict__
try:
self.namespace['__builtins__'] = __builtins__
except NameError:
pass # Not there on Jython...
def do_add_exec(self, codeFragment):
command = Command(self.interpreter, codeFragment)
command.run()
return command.more
def get_namespace(self):
return self.namespace
def close(self):
sys.exit(0)
class _ProcessExecQueueHelper:
_debug_hook = None
_return_control_osc = False
def set_debug_hook(debug_hook):
_ProcessExecQueueHelper._debug_hook = debug_hook
def activate_mpl_if_already_imported(interpreter):
if interpreter.mpl_modules_for_patching:
for module in dict_keys(interpreter.mpl_modules_for_patching):
if module in sys.modules:
activate_function = interpreter.mpl_modules_for_patching.pop(module)
activate_function()
def init_set_return_control_back(interpreter):
from pydev_ipython.inputhook import set_return_control_callback
def return_control():
''' A function that the inputhooks can call (via inputhook.stdin_ready()) to find
out if they should cede control and return '''
if _ProcessExecQueueHelper._debug_hook:
# Some of the input hooks check return control without doing
# a single operation, so we don't return True on every
# call when the debug hook is in place to allow the GUI to run
# XXX: Eventually the inputhook code will have diverged enough
# from the IPython source that it will be worthwhile rewriting
# it rather than pretending to maintain the old API
_ProcessExecQueueHelper._return_control_osc = not _ProcessExecQueueHelper._return_control_osc
if _ProcessExecQueueHelper._return_control_osc:
return True
if not interpreter.exec_queue.empty():
return True
return False
set_return_control_callback(return_control)
def init_mpl_in_console(interpreter):
init_set_return_control_back(interpreter)
if not INTERACTIVE_MODE_AVAILABLE:
return
activate_mpl_if_already_imported(interpreter)
from _pydev_bundle.pydev_import_hook import import_hook_manager
for mod in dict_keys(interpreter.mpl_modules_for_patching):
import_hook_manager.add_module_name(mod, interpreter.mpl_modules_for_patching.pop(mod))
def process_exec_queue(interpreter):
init_mpl_in_console(interpreter)
from pydev_ipython.inputhook import get_inputhook
while 1:
# Running the request may have changed the inputhook in use
inputhook = get_inputhook()
if _ProcessExecQueueHelper._debug_hook:
_ProcessExecQueueHelper._debug_hook()
if inputhook:
try:
# Note: it'll block here until return_control returns True.
inputhook()
except:
import traceback;traceback.print_exc()
try:
try:
code_fragment = interpreter.exec_queue.get(block=True, timeout=1/20.) # 20 calls/second
except _queue.Empty:
continue
if hasattr(code_fragment, '__call__'):
# It can be a callable (i.e.: something that must run in the main
# thread can be put in the queue for later execution).
code_fragment()
else:
more = interpreter.add_exec(code_fragment)
except KeyboardInterrupt:
interpreter.buffer = None
continue
except SystemExit:
raise
except:
type, value, tb = sys.exc_info()
traceback.print_exception(type, value, tb, file=sys.__stderr__)
exit()
if 'IPYTHONENABLE' in os.environ:
IPYTHON = os.environ['IPYTHONENABLE'] == 'True'
else:
IPYTHON = True
try:
try:
exitfunc = sys.exitfunc
except AttributeError:
exitfunc = None
if IPYTHON:
from _pydev_bundle.pydev_ipython_console import InterpreterInterface
if exitfunc is not None:
sys.exitfunc = exitfunc
else:
try:
delattr(sys, 'exitfunc')
except:
pass
except:
IPYTHON = False
pass
#=======================================================================================================================
# _DoExit
#=======================================================================================================================
def do_exit(*args):
'''
We have to override the exit because calling sys.exit will only actually exit the main thread,
and as we're in a Xml-rpc server, that won't work.
'''
try:
import java.lang.System
java.lang.System.exit(1)
except ImportError:
if len(args) == 1:
os._exit(args[0])
else:
os._exit(0)
def enable_thrift_logging():
"""Sets up `thriftpy` logger
The logger is used in `thriftpy/server.py` for logging exceptions.
"""
import logging
# create logger
logger = logging.getLogger('_shaded_thriftpy')
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
def create_server_handler_factory(interpreter):
def server_handler_factory(rpc_client):
interpreter.rpc_client = rpc_client
return interpreter
return server_handler_factory
def start_server(port):
if port is None:
port = 0
# 0. General stuff
#replace exit (see comments on method)
#note that this does not work in jython!!! (sys method can't be replaced).
sys.exit = do_exit
from pydev_console.protocol import PythonConsoleBackendService, PythonConsoleFrontendService
enable_thrift_logging()
server_service = PythonConsoleBackendService
client_service = PythonConsoleFrontendService
# 1. Start Python console server
# `InterpreterInterface` implements all methods required for `server_handler`
interpreter = InterpreterInterface(threading.currentThread())
# Tell UMD the proper default namespace
_set_globals_function(interpreter.get_namespace)
server_socket = start_rpc_server_and_make_client('', port, server_service, client_service, create_server_handler_factory(interpreter))
# 2. Print server port for the IDE
_, server_port = server_socket.getsockname()
print(server_port)
# 3. Wait for IDE to connect to the server
process_exec_queue(interpreter)
def start_client(host, port):
#replace exit (see comments on method)
#note that this does not work in jython!!! (sys method can't be replaced).
sys.exit = do_exit
from pydev_console.protocol import PythonConsoleBackendService, PythonConsoleFrontendService
enable_thrift_logging()
client_service = PythonConsoleFrontendService
client, server_transport = make_rpc_client(client_service, host, port)
interpreter = InterpreterInterface(threading.currentThread(), rpc_client=client)
# we do not need to start the server in a new thread because it does not need to accept a client connection, it already has it
# Tell UMD the proper default namespace
_set_globals_function(interpreter.get_namespace)
server_service = PythonConsoleBackendService
# `InterpreterInterface` implements all methods required for the handler
server_handler = interpreter
start_rpc_server(server_transport, server_service, server_handler)
process_exec_queue(interpreter)
def get_interpreter():
try:
interpreterInterface = getattr(__builtin__, 'interpreter')
except AttributeError:
interpreterInterface = InterpreterInterface(None, None, threading.currentThread())
__builtin__.interpreter = interpreterInterface
print(interpreterInterface.get_greeting_msg())
return interpreterInterface
def get_completions(text, token, globals, locals):
interpreterInterface = get_interpreter()
interpreterInterface.interpreter.update(globals, locals)
return interpreterInterface.getCompletions(text, token)
#=======================================================================================================================
# main
#=======================================================================================================================
if __name__ == '__main__':
#Important: don't use this module directly as the __main__ module, rather, import itself as pydevconsole
#so that we don't get multiple pydevconsole modules if it's executed directly (otherwise we'd have multiple
#representations of its classes).
#See: https://sw-brainwy.rhcloud.com/tracker/PyDev/446:
#'Variables' and 'Expressions' views stopped working when debugging interactive console
import pydevconsole
sys.stdin = pydevconsole.BaseStdIn(sys.stdin)
# parse command-line arguments
optlist, _ = gnu_getopt(sys.argv, 'm:h:p', ['mode=', 'host=', 'port='])
mode = None
host = None
port = None
for opt, arg in optlist:
if opt in ('-m', '--mode'):
mode = arg
elif opt in ('-h', '--host'):
host = arg
elif opt in ('-p', '--port'):
port = int(arg)
if mode not in ('client', 'server'):
sys.exit(-1)
if mode == 'client':
if not port:
# port must be set for client
sys.exit(-1)
if not host:
from _pydev_bundle import pydev_localhost
host = client_host = pydev_localhost.get_localhost()
pydevconsole.start_client(host, port)
elif mode == 'server':
pydevconsole.start_server(port)
|
apache-2.0
| 9,026,109,387,334,981,000 | 5,181,328,576,930,242,000 | 31.268041 | 138 | 0.626038 | false |
MarauderXtreme/sipa
|
sipa/model/finance.py
|
1
|
2369
|
from abc import ABCMeta, abstractmethod
from flask_babel import gettext
from sipa.model.fancy_property import ActiveProperty, Capabilities
from sipa.units import format_money
from sipa.utils import compare_all_attributes
class BaseFinanceInformation(metaclass=ABCMeta):
"""A Class providing finance information about a user.
This class bundles informations such as whether the current user
is obliged to pay, the current balance, history, and the time of
the last update. The balance is being provided as a
FancyProperty, so it can be used as a row.
For subclassing, implement the respective abstract
methods/properties.
"""
@property
def balance(self):
"""The current balance as a
:py:class:`~sipa.model.fancy_property.ActiveProperty`
If :attr:`has_to_pay` is False, return an empty property with
the note that the user doesn't have to pay. Else, return the
balance formatted and styled (red/green) as money and mark
this property editable.
"""
if not self.has_to_pay:
return ActiveProperty('finance_balance',
value=gettext("Beitrag in Miete inbegriffen"),
raw_value=0, empty=True)
return ActiveProperty('finance_balance',
value=format_money(self.raw_balance),
raw_value=self.raw_balance)
@property
@abstractmethod
def raw_balance(self) -> float:
"""**[Abstract]** The current balance
If :py:meth:`has_to_pay` is False, this method will not be
used implicitly.
"""
pass
@property
@abstractmethod
def has_to_pay(self) -> bool:
"""**[Abstract]** Whether the user is obliged to pay."""
pass
@property
@abstractmethod
def history(self):
"""**[Abstract]** History of payments
This method should return an iterable of a (datetime, int)
tuple.
"""
pass
@property
@abstractmethod
def last_update(self):
"""**[Abstract]** The time of the last update."""
pass
def __eq__(self, other):
return compare_all_attributes(self, other, ['raw_balance', 'has_to_pay',
'history', 'last_update'])
|
mit
| -3,464,621,418,245,164,500 | 1,305,819,278,310,092,000 | 31.013514 | 80 | 0.604474 | false |
untom/scikit-learn
|
sklearn/decomposition/base.py
|
313
|
5647
|
"""Principal Component Analysis Base Classes"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Kyle Kastner <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted
from ..externals import six
from abc import ABCMeta, abstractmethod
class _BasePCA(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
@abstractmethod
def fit(X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
def transform(self, X, y=None):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, copy=True, n_components=2, whiten=False)
>>> ipca.transform(X) # doctest: +SKIP
"""
check_is_fitted(self, ['mean_', 'components_'], all_or_any=all)
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.whiten:
return fast_dot(X, np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
|
bsd-3-clause
| -4,672,431,850,505,874,000 | 791,967,570,779,361,900 | 33.858025 | 81 | 0.59359 | false |
rogerthat-platform/rogerthat-backend
|
src-test/rogerthat_tests/mobicage/bizz/test_system.py
|
1
|
1331
|
# -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
from rogerthat.bizz.system import unregister_mobile
from rogerthat.bizz.user import archiveUserDataAfterDisconnect
from rogerthat.dal.friend import get_friends_map
from rogerthat.dal.profile import get_user_profile
from rogerthat.rpc import users
import mc_unittest
class Test(mc_unittest.TestCase):
def setUp(self):
mc_unittest.TestCase.setUp(self, datastore_hr_probability=1)
def test_unregister_deactivated_account(self):
user = users.get_current_user()
mobile = users.get_current_mobile()
# deactivate account
archiveUserDataAfterDisconnect(user, get_friends_map(user), get_user_profile(user), False)
unregister_mobile(user, mobile)
|
apache-2.0
| -3,112,126,741,191,089,700 | -6,791,203,611,718,813,000 | 35.972222 | 98 | 0.746807 | false |
LuqueDaniel/ninja-ide
|
ninja_ide/gui/dialogs/preferences/preferences_editor_configuration.py
|
6
|
13499
|
# -*- coding: utf-8 -*-
#
# This file is part of NINJA-IDE (http://ninja-ide.org).
#
# NINJA-IDE is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# NINJA-IDE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NINJA-IDE; If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import unicode_literals
from PyQt4.QtGui import QWidget
from PyQt4.QtGui import QVBoxLayout
from PyQt4.QtGui import QHBoxLayout
from PyQt4.QtGui import QGroupBox
from PyQt4.QtGui import QCheckBox
from PyQt4.QtGui import QGridLayout
from PyQt4.QtGui import QSpacerItem
from PyQt4.QtGui import QLabel
from PyQt4.QtGui import QSpinBox
from PyQt4.QtGui import QComboBox
from PyQt4.QtGui import QSizePolicy
from PyQt4.QtCore import Qt
from PyQt4.QtCore import SIGNAL
from ninja_ide import translations
from ninja_ide.core import settings
from ninja_ide.gui.ide import IDE
from ninja_ide.gui.dialogs.preferences import preferences
class EditorConfiguration(QWidget):
"""EditorConfiguration widget class"""
def __init__(self, parent):
super(EditorConfiguration, self).__init__()
self._preferences, vbox = parent, QVBoxLayout(self)
# groups
group1 = QGroupBox(translations.TR_PREFERENCES_EDITOR_CONFIG_INDENT)
group2 = QGroupBox(translations.TR_PREFERENCES_EDITOR_CONFIG_MARGIN)
group3 = QGroupBox(translations.TR_LINT_DIRTY_TEXT)
group4 = QGroupBox(translations.TR_PEP8_DIRTY_TEXT)
group5 = QGroupBox(translations.TR_HIGHLIGHTER_EXTRAS)
group6 = QGroupBox(translations.TR_TYPING_ASSISTANCE)
group7 = QGroupBox(translations.TR_DISPLAY)
# groups container
container_widget_with_all_preferences = QWidget()
formFeatures = QGridLayout(container_widget_with_all_preferences)
# Indentation
hboxg1 = QHBoxLayout(group1)
hboxg1.setContentsMargins(5, 15, 5, 5)
self._spin, self._checkUseTabs = QSpinBox(), QComboBox()
self._spin.setRange(1, 10)
self._spin.setValue(settings.INDENT)
hboxg1.addWidget(self._spin)
self._checkUseTabs.addItems([
translations.TR_PREFERENCES_EDITOR_CONFIG_SPACES.capitalize(),
translations.TR_PREFERENCES_EDITOR_CONFIG_TABS.capitalize()])
self._checkUseTabs.setCurrentIndex(int(settings.USE_TABS))
hboxg1.addWidget(self._checkUseTabs)
formFeatures.addWidget(group1, 0, 0)
# Margin Line
hboxg2 = QHBoxLayout(group2)
hboxg2.setContentsMargins(5, 15, 5, 5)
self._checkShowMargin = QCheckBox(
translations.TR_PREFERENCES_EDITOR_CONFIG_SHOW_MARGIN_LINE)
self._checkShowMargin.setChecked(settings.SHOW_MARGIN_LINE)
hboxg2.addWidget(self._checkShowMargin)
self._spinMargin = QSpinBox()
self._spinMargin.setRange(50, 100)
self._spinMargin.setSingleStep(2)
self._spinMargin.setValue(settings.MARGIN_LINE)
hboxg2.addWidget(self._spinMargin)
hboxg2.addWidget(QLabel(translations.TR_CHARACTERS))
formFeatures.addWidget(group2, 0, 1)
# Display Errors
vboxDisplay = QVBoxLayout(group7)
vboxDisplay.setContentsMargins(5, 15, 5, 5)
self._checkHighlightLine = QComboBox()
self._checkHighlightLine.addItems([
translations.TR_PREFERENCES_EDITOR_CONFIG_ERROR_USE_BACKGROUND,
translations.TR_PREFERENCES_EDITOR_CONFIG_ERROR_USE_UNDERLINE])
self._checkHighlightLine.setCurrentIndex(
int(settings.UNDERLINE_NOT_BACKGROUND))
hboxDisplay1 = QHBoxLayout()
hboxDisplay1.addWidget(QLabel(translations.TR_DISPLAY_ERRORS))
hboxDisplay1.addWidget(self._checkHighlightLine)
hboxDisplay2 = QHBoxLayout()
self._checkDisplayLineNumbers = QCheckBox(
translations.TR_DISPLAY_LINE_NUMBERS)
self._checkDisplayLineNumbers.setChecked(settings.SHOW_LINE_NUMBERS)
hboxDisplay2.addWidget(self._checkDisplayLineNumbers)
vboxDisplay.addLayout(hboxDisplay1)
vboxDisplay.addLayout(hboxDisplay2)
formFeatures.addWidget(group7, 1, 0, 1, 0)
# Find Lint Errors (highlighter)
vboxg3 = QVBoxLayout(group3)
self._checkErrors = QCheckBox(
translations.TR_PREFERENCES_EDITOR_CONFIG_FIND_ERRORS)
self._checkErrors.setChecked(settings.FIND_ERRORS)
self.connect(self._checkErrors, SIGNAL("stateChanged(int)"),
self._disable_show_errors)
self._showErrorsOnLine = QCheckBox(
translations.TR_PREFERENCES_EDITOR_CONFIG_SHOW_TOOLTIP_ERRORS)
self._showErrorsOnLine.setChecked(settings.ERRORS_HIGHLIGHT_LINE)
self.connect(self._showErrorsOnLine, SIGNAL("stateChanged(int)"),
self._enable_errors_inline)
vboxg3.addWidget(self._checkErrors)
vboxg3.addWidget(self._showErrorsOnLine)
formFeatures.addWidget(group3, 2, 0)
# Find PEP8 Errors (highlighter)
vboxg4 = QVBoxLayout(group4)
vboxg4.setContentsMargins(5, 15, 5, 5)
self._checkStyle = QCheckBox(
translations.TR_PREFERENCES_EDITOR_CONFIG_SHOW_PEP8)
self._checkStyle.setChecked(settings.CHECK_STYLE)
self.connect(self._checkStyle, SIGNAL("stateChanged(int)"),
self._disable_check_style)
vboxg4.addWidget(self._checkStyle)
self._checkStyleOnLine = QCheckBox(
translations.TR_PREFERENCES_EDITOR_CONFIG_SHOW_TOOLTIP_PEP8)
self._checkStyleOnLine.setChecked(settings.CHECK_HIGHLIGHT_LINE)
self.connect(self._checkStyleOnLine, SIGNAL("stateChanged(int)"),
self._enable_check_inline)
vboxg4.addWidget(self._checkStyleOnLine)
formFeatures.addWidget(group4, 2, 1)
# Show Python3 Migration, DocStrings and Spaces (highlighter)
vboxg5 = QVBoxLayout(group5)
vboxg5.setContentsMargins(5, 15, 5, 5)
self._showMigrationTips = QCheckBox(
translations.TR_PREFERENCES_EDITOR_CONFIG_SHOW_MIGRATION)
self._showMigrationTips.setChecked(settings.SHOW_MIGRATION_TIPS)
vboxg5.addWidget(self._showMigrationTips)
self._checkForDocstrings = QCheckBox(
translations.TR_PREFERENCES_EDITOR_CONFIG_CHECK_FOR_DOCSTRINGS)
self._checkForDocstrings.setChecked(settings.CHECK_FOR_DOCSTRINGS)
vboxg5.addWidget(self._checkForDocstrings)
self._checkShowSpaces = QCheckBox(
translations.TR_PREFERENCES_EDITOR_CONFIG_SHOW_TABS_AND_SPACES)
self._checkShowSpaces.setChecked(settings.SHOW_TABS_AND_SPACES)
vboxg5.addWidget(self._checkShowSpaces)
self._checkIndentationGuide = QCheckBox(
translations.TR_SHOW_INDENTATION_GUIDE)
self._checkIndentationGuide.setChecked(settings.SHOW_INDENTATION_GUIDE)
vboxg5.addWidget(self._checkIndentationGuide)
formFeatures.addWidget(group5, 3, 0)
# End of line, Center On Scroll, Trailing space, Word wrap
vboxg6 = QVBoxLayout(group6)
vboxg6.setContentsMargins(5, 15, 5, 5)
self._checkEndOfLine = QCheckBox(
translations.TR_PREFERENCES_EDITOR_CONFIG_END_OF_LINE)
self._checkEndOfLine.setChecked(settings.USE_PLATFORM_END_OF_LINE)
vboxg6.addWidget(self._checkEndOfLine)
self._checkCenterScroll = QCheckBox(
translations.TR_PREFERENCES_EDITOR_CONFIG_CENTER_SCROLL)
self._checkCenterScroll.setChecked(settings.CENTER_ON_SCROLL)
vboxg6.addWidget(self._checkCenterScroll)
self._checkTrailing = QCheckBox(
translations.TR_PREFERENCES_EDITOR_CONFIG_REMOVE_TRAILING)
self._checkTrailing.setChecked(settings.REMOVE_TRAILING_SPACES)
vboxg6.addWidget(self._checkTrailing)
self._allowWordWrap = QCheckBox(
translations.TR_PREFERENCES_EDITOR_CONFIG_WORD_WRAP)
self._allowWordWrap.setChecked(settings.ALLOW_WORD_WRAP)
vboxg6.addWidget(self._allowWordWrap)
formFeatures.addWidget(group6, 3, 1)
# pack all the groups
vbox.addWidget(container_widget_with_all_preferences)
vbox.addItem(QSpacerItem(0, 10, QSizePolicy.Expanding,
QSizePolicy.Expanding))
self.connect(self._preferences, SIGNAL("savePreferences()"), self.save)
def _enable_check_inline(self, val):
"""Method that takes a value to enable the inline style checking"""
if val == Qt.Checked:
self._checkStyle.setChecked(True)
def _enable_errors_inline(self, val):
"""Method that takes a value to enable the inline errors checking"""
if val == Qt.Checked:
self._checkErrors.setChecked(True)
def _disable_check_style(self, val):
"""Method that takes a value to disable the inline style checking"""
if val == Qt.Unchecked:
self._checkStyleOnLine.setChecked(False)
def _disable_show_errors(self, val):
"""Method that takes a value to disable the inline errors checking"""
if val == Qt.Unchecked:
self._showErrorsOnLine.setChecked(False)
def save(self):
"""Method to save settings"""
qsettings = IDE.ninja_settings()
settings.USE_TABS = bool(self._checkUseTabs.currentIndex())
qsettings.setValue('preferences/editor/useTabs',
settings.USE_TABS)
margin_line = self._spinMargin.value()
settings.MARGIN_LINE = margin_line
settings.pep8mod_update_margin_line_length(margin_line)
qsettings.setValue('preferences/editor/marginLine', margin_line)
settings.SHOW_MARGIN_LINE = self._checkShowMargin.isChecked()
qsettings.setValue('preferences/editor/showMarginLine',
settings.SHOW_MARGIN_LINE)
settings.INDENT = self._spin.value()
qsettings.setValue('preferences/editor/indent', settings.INDENT)
endOfLine = self._checkEndOfLine.isChecked()
settings.USE_PLATFORM_END_OF_LINE = endOfLine
qsettings.setValue('preferences/editor/platformEndOfLine', endOfLine)
settings.UNDERLINE_NOT_BACKGROUND = \
bool(self._checkHighlightLine.currentIndex())
qsettings.setValue('preferences/editor/errorsUnderlineBackground',
settings.UNDERLINE_NOT_BACKGROUND)
settings.FIND_ERRORS = self._checkErrors.isChecked()
qsettings.setValue('preferences/editor/errors', settings.FIND_ERRORS)
settings.ERRORS_HIGHLIGHT_LINE = self._showErrorsOnLine.isChecked()
qsettings.setValue('preferences/editor/errorsInLine',
settings.ERRORS_HIGHLIGHT_LINE)
settings.CHECK_STYLE = self._checkStyle.isChecked()
qsettings.setValue('preferences/editor/checkStyle',
settings.CHECK_STYLE)
settings.SHOW_MIGRATION_TIPS = self._showMigrationTips.isChecked()
qsettings.setValue('preferences/editor/showMigrationTips',
settings.SHOW_MIGRATION_TIPS)
settings.CHECK_HIGHLIGHT_LINE = self._checkStyleOnLine.isChecked()
qsettings.setValue('preferences/editor/checkStyleInline',
settings.CHECK_HIGHLIGHT_LINE)
settings.CENTER_ON_SCROLL = self._checkCenterScroll.isChecked()
qsettings.setValue('preferences/editor/centerOnScroll',
settings.CENTER_ON_SCROLL)
settings.REMOVE_TRAILING_SPACES = self._checkTrailing.isChecked()
qsettings.setValue('preferences/editor/removeTrailingSpaces',
settings.REMOVE_TRAILING_SPACES)
settings.ALLOW_WORD_WRAP = self._allowWordWrap.isChecked()
qsettings.setValue('preferences/editor/allowWordWrap',
settings.ALLOW_WORD_WRAP)
settings.SHOW_TABS_AND_SPACES = self._checkShowSpaces.isChecked()
qsettings.setValue('preferences/editor/showTabsAndSpaces',
settings.SHOW_TABS_AND_SPACES)
settings.SHOW_INDENTATION_GUIDE = (
self._checkIndentationGuide.isChecked())
qsettings.setValue('preferences/editor/showIndentationGuide',
settings.SHOW_INDENTATION_GUIDE)
settings.CHECK_FOR_DOCSTRINGS = self._checkForDocstrings.isChecked()
qsettings.setValue('preferences/editor/checkForDocstrings',
settings.CHECK_FOR_DOCSTRINGS)
settings.SHOW_LINE_NUMBERS = self._checkDisplayLineNumbers.isChecked()
qsettings.setValue('preferences/editor/showLineNumbers',
settings.SHOW_LINE_NUMBERS)
if settings.USE_TABS:
settings.pep8mod_add_ignore("W191")
else:
settings.pep8mod_remove_ignore("W191")
settings.pep8mod_refresh_checks()
preferences.Preferences.register_configuration(
'EDITOR', EditorConfiguration,
translations.TR_PREFERENCES_EDITOR_CONFIGURATION,
weight=0, subsection='CONFIGURATION')
|
gpl-3.0
| -222,798,311,619,224,160 | 1,984,510,042,520,174,300 | 46.699647 | 79 | 0.682051 | false |
99designs/colorific
|
setup.py
|
1
|
1724
|
# -*- coding: utf-8 -*-
#
# setup.py
# colorific
#
"""
Package information for colorific.
"""
import os.path
import platform
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
PROJECT_DIR = os.path.realpath(os.path.join(os.path.dirname(__file__)))
README_PATH = os.path.join(PROJECT_DIR, 'README.rst')
REQUIREMENTS_PATH = os.path.join(PROJECT_DIR, 'requirements_py{0}.pip'
.format(platform.python_version_tuple()[0]))
VERSION = __import__('colorific').get_version()
install_requires = [
'Pillow>=2.6.1',
'colormath>=2.0.2',
'numpy>=1.9.0',
]
if platform.python_version_tuple() < ('3', '2'):
install_requires.append('backports.functools_lru_cache>=1.0.1')
setup(
name='colorific',
version=VERSION,
description='Automatic color palette detection',
long_description=open(README_PATH).read(),
author='Lars Yencken',
author_email='[email protected]',
url='http://github.com/99designs/colorific',
license='ISC',
packages=['colorific'],
zip_safe=False,
install_requires=install_requires,
entry_points={'console_scripts': ['colorific = colorific.script:main']},
test_suite='tests',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: ISC License (ISCL)',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
|
isc
| 3,354,814,896,616,378,000 | 5,360,800,644,801,147,000 | 28.220339 | 77 | 0.62819 | false |
hdinsight/hue
|
desktop/core/ext-py/elementtree/elementtree/__init__.py
|
127
|
1491
|
# $Id: __init__.py 1821 2004-06-03 16:57:49Z fredrik $
# elementtree package
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2004 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
|
apache-2.0
| 6,866,900,078,355,550,000 | -1,603,766,600,632,260,900 | 48.7 | 70 | 0.706237 | false |
haojunyu/numpy
|
numpy/ma/mrecords.py
|
90
|
27383
|
""":mod:`numpy.ma..mrecords`
Defines the equivalent of :class:`numpy.recarrays` for masked arrays,
where fields can be accessed as attributes.
Note that :class:`numpy.ma.MaskedArray` already supports structured datatypes
and the masking of individual fields.
.. moduleauthor:: Pierre Gerard-Marchant
"""
from __future__ import division, absolute_import, print_function
# We should make sure that no field is called '_mask','mask','_fieldmask',
# or whatever restricted keywords. An idea would be to no bother in the
# first place, and then rename the invalid fields with a trailing
# underscore. Maybe we could just overload the parser function ?
import sys
import warnings
import numpy as np
import numpy.core.numerictypes as ntypes
from numpy.compat import basestring
from numpy import (
bool_, dtype, ndarray, recarray, array as narray
)
from numpy.core.records import (
fromarrays as recfromarrays, fromrecords as recfromrecords
)
_byteorderconv = np.core.records._byteorderconv
_typestr = ntypes._typestr
import numpy.ma as ma
from numpy.ma import (
MAError, MaskedArray, masked, nomask, masked_array, getdata,
getmaskarray, filled
)
_check_fill_value = ma.core._check_fill_value
__all__ = [
'MaskedRecords', 'mrecarray', 'fromarrays', 'fromrecords',
'fromtextfile', 'addfield',
]
reserved_fields = ['_data', '_mask', '_fieldmask', 'dtype']
def _getformats(data):
"""
Returns the formats of arrays in arraylist as a comma-separated string.
"""
if hasattr(data, 'dtype'):
return ",".join([desc[1] for desc in data.dtype.descr])
formats = ''
for obj in data:
obj = np.asarray(obj)
formats += _typestr[obj.dtype.type]
if issubclass(obj.dtype.type, ntypes.flexible):
formats += repr(obj.itemsize)
formats += ','
return formats[:-1]
def _checknames(descr, names=None):
"""
Checks that field names ``descr`` are not reserved keywords.
If this is the case, a default 'f%i' is substituted. If the argument
`names` is not None, updates the field names to valid names.
"""
ndescr = len(descr)
default_names = ['f%i' % i for i in range(ndescr)]
if names is None:
new_names = default_names
else:
if isinstance(names, (tuple, list)):
new_names = names
elif isinstance(names, str):
new_names = names.split(',')
else:
raise NameError("illegal input names %s" % repr(names))
nnames = len(new_names)
if nnames < ndescr:
new_names += default_names[nnames:]
ndescr = []
for (n, d, t) in zip(new_names, default_names, descr.descr):
if n in reserved_fields:
if t[0] in reserved_fields:
ndescr.append((d, t[1]))
else:
ndescr.append(t)
else:
ndescr.append((n, t[1]))
return np.dtype(ndescr)
def _get_fieldmask(self):
mdescr = [(n, '|b1') for n in self.dtype.names]
fdmask = np.empty(self.shape, dtype=mdescr)
fdmask.flat = tuple([False] * len(mdescr))
return fdmask
class MaskedRecords(MaskedArray, object):
"""
Attributes
----------
_data : recarray
Underlying data, as a record array.
_mask : boolean array
Mask of the records. A record is masked when all its fields are
masked.
_fieldmask : boolean recarray
Record array of booleans, setting the mask of each individual field
of each record.
_fill_value : record
Filling values for each field.
"""
def __new__(cls, shape, dtype=None, buf=None, offset=0, strides=None,
formats=None, names=None, titles=None,
byteorder=None, aligned=False,
mask=nomask, hard_mask=False, fill_value=None, keep_mask=True,
copy=False,
**options):
self = recarray.__new__(cls, shape, dtype=dtype, buf=buf, offset=offset,
strides=strides, formats=formats, names=names,
titles=titles, byteorder=byteorder,
aligned=aligned,)
mdtype = ma.make_mask_descr(self.dtype)
if mask is nomask or not np.size(mask):
if not keep_mask:
self._mask = tuple([False] * len(mdtype))
else:
mask = np.array(mask, copy=copy)
if mask.shape != self.shape:
(nd, nm) = (self.size, mask.size)
if nm == 1:
mask = np.resize(mask, self.shape)
elif nm == nd:
mask = np.reshape(mask, self.shape)
else:
msg = "Mask and data not compatible: data size is %i, " + \
"mask size is %i."
raise MAError(msg % (nd, nm))
copy = True
if not keep_mask:
self.__setmask__(mask)
self._sharedmask = True
else:
if mask.dtype == mdtype:
_mask = mask
else:
_mask = np.array([tuple([m] * len(mdtype)) for m in mask],
dtype=mdtype)
self._mask = _mask
return self
def __array_finalize__(self, obj):
# Make sure we have a _fieldmask by default
_mask = getattr(obj, '_mask', None)
if _mask is None:
objmask = getattr(obj, '_mask', nomask)
_dtype = ndarray.__getattribute__(self, 'dtype')
if objmask is nomask:
_mask = ma.make_mask_none(self.shape, dtype=_dtype)
else:
mdescr = ma.make_mask_descr(_dtype)
_mask = narray([tuple([m] * len(mdescr)) for m in objmask],
dtype=mdescr).view(recarray)
# Update some of the attributes
_dict = self.__dict__
_dict.update(_mask=_mask)
self._update_from(obj)
if _dict['_baseclass'] == ndarray:
_dict['_baseclass'] = recarray
return
def _getdata(self):
"""
Returns the data as a recarray.
"""
return ndarray.view(self, recarray)
_data = property(fget=_getdata)
def _getfieldmask(self):
"""
Alias to mask.
"""
return self._mask
_fieldmask = property(fget=_getfieldmask)
def __len__(self):
"""
Returns the length
"""
# We have more than one record
if self.ndim:
return len(self._data)
# We have only one record: return the nb of fields
return len(self.dtype)
def __getattribute__(self, attr):
try:
return object.__getattribute__(self, attr)
except AttributeError:
# attr must be a fieldname
pass
fielddict = ndarray.__getattribute__(self, 'dtype').fields
try:
res = fielddict[attr][:2]
except (TypeError, KeyError):
raise AttributeError("record array has no attribute %s" % attr)
# So far, so good
_localdict = ndarray.__getattribute__(self, '__dict__')
_data = ndarray.view(self, _localdict['_baseclass'])
obj = _data.getfield(*res)
if obj.dtype.fields:
raise NotImplementedError("MaskedRecords is currently limited to"
"simple records.")
# Get some special attributes
# Reset the object's mask
hasmasked = False
_mask = _localdict.get('_mask', None)
if _mask is not None:
try:
_mask = _mask[attr]
except IndexError:
# Couldn't find a mask: use the default (nomask)
pass
hasmasked = _mask.view((np.bool, (len(_mask.dtype) or 1))).any()
if (obj.shape or hasmasked):
obj = obj.view(MaskedArray)
obj._baseclass = ndarray
obj._isfield = True
obj._mask = _mask
# Reset the field values
_fill_value = _localdict.get('_fill_value', None)
if _fill_value is not None:
try:
obj._fill_value = _fill_value[attr]
except ValueError:
obj._fill_value = None
else:
obj = obj.item()
return obj
def __setattr__(self, attr, val):
"""
Sets the attribute attr to the value val.
"""
# Should we call __setmask__ first ?
if attr in ['mask', 'fieldmask']:
self.__setmask__(val)
return
# Create a shortcut (so that we don't have to call getattr all the time)
_localdict = object.__getattribute__(self, '__dict__')
# Check whether we're creating a new field
newattr = attr not in _localdict
try:
# Is attr a generic attribute ?
ret = object.__setattr__(self, attr, val)
except:
# Not a generic attribute: exit if it's not a valid field
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
optinfo = ndarray.__getattribute__(self, '_optinfo') or {}
if not (attr in fielddict or attr in optinfo):
exctype, value = sys.exc_info()[:2]
raise exctype(value)
else:
# Get the list of names
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
# Check the attribute
if attr not in fielddict:
return ret
if newattr:
# We just added this one or this setattr worked on an
# internal attribute.
try:
object.__delattr__(self, attr)
except:
return ret
# Let's try to set the field
try:
res = fielddict[attr][:2]
except (TypeError, KeyError):
raise AttributeError("record array has no attribute %s" % attr)
if val is masked:
_fill_value = _localdict['_fill_value']
if _fill_value is not None:
dval = _localdict['_fill_value'][attr]
else:
dval = val
mval = True
else:
dval = filled(val)
mval = getmaskarray(val)
obj = ndarray.__getattribute__(self, '_data').setfield(dval, *res)
_localdict['_mask'].__setitem__(attr, mval)
return obj
def __getitem__(self, indx):
"""
Returns all the fields sharing the same fieldname base.
The fieldname base is either `_data` or `_mask`.
"""
_localdict = self.__dict__
_mask = ndarray.__getattribute__(self, '_mask')
_data = ndarray.view(self, _localdict['_baseclass'])
# We want a field
if isinstance(indx, basestring):
# Make sure _sharedmask is True to propagate back to _fieldmask
# Don't use _set_mask, there are some copies being made that
# break propagation Don't force the mask to nomask, that wreaks
# easy masking
obj = _data[indx].view(MaskedArray)
obj._mask = _mask[indx]
obj._sharedmask = True
fval = _localdict['_fill_value']
if fval is not None:
obj._fill_value = fval[indx]
# Force to masked if the mask is True
if not obj.ndim and obj._mask:
return masked
return obj
# We want some elements.
# First, the data.
obj = np.array(_data[indx], copy=False).view(mrecarray)
obj._mask = np.array(_mask[indx], copy=False).view(recarray)
return obj
def __setitem__(self, indx, value):
"""
Sets the given record to value.
"""
MaskedArray.__setitem__(self, indx, value)
if isinstance(indx, basestring):
self._mask[indx] = ma.getmaskarray(value)
def __str__(self):
"""
Calculates the string representation.
"""
if self.size > 1:
mstr = ["(%s)" % ",".join([str(i) for i in s])
for s in zip(*[getattr(self, f) for f in self.dtype.names])]
return "[%s]" % ", ".join(mstr)
else:
mstr = ["%s" % ",".join([str(i) for i in s])
for s in zip([getattr(self, f) for f in self.dtype.names])]
return "(%s)" % ", ".join(mstr)
def __repr__(self):
"""
Calculates the repr representation.
"""
_names = self.dtype.names
fmt = "%%%is : %%s" % (max([len(n) for n in _names]) + 4,)
reprstr = [fmt % (f, getattr(self, f)) for f in self.dtype.names]
reprstr.insert(0, 'masked_records(')
reprstr.extend([fmt % (' fill_value', self.fill_value),
' )'])
return str("\n".join(reprstr))
def view(self, dtype=None, type=None):
"""
Returns a view of the mrecarray.
"""
# OK, basic copy-paste from MaskedArray.view.
if dtype is None:
if type is None:
output = ndarray.view(self)
else:
output = ndarray.view(self, type)
# Here again.
elif type is None:
try:
if issubclass(dtype, ndarray):
output = ndarray.view(self, dtype)
dtype = None
else:
output = ndarray.view(self, dtype)
# OK, there's the change
except TypeError:
dtype = np.dtype(dtype)
# we need to revert to MaskedArray, but keeping the possibility
# of subclasses (eg, TimeSeriesRecords), so we'll force a type
# set to the first parent
if dtype.fields is None:
basetype = self.__class__.__bases__[0]
output = self.__array__().view(dtype, basetype)
output._update_from(self)
else:
output = ndarray.view(self, dtype)
output._fill_value = None
else:
output = ndarray.view(self, dtype, type)
# Update the mask, just like in MaskedArray.view
if (getattr(output, '_mask', nomask) is not nomask):
mdtype = ma.make_mask_descr(output.dtype)
output._mask = self._mask.view(mdtype, ndarray)
output._mask.shape = output.shape
return output
def harden_mask(self):
"""
Forces the mask to hard.
"""
self._hardmask = True
def soften_mask(self):
"""
Forces the mask to soft
"""
self._hardmask = False
def copy(self):
"""
Returns a copy of the masked record.
"""
copied = self._data.copy().view(type(self))
copied._mask = self._mask.copy()
return copied
def tolist(self, fill_value=None):
"""
Return the data portion of the array as a list.
Data items are converted to the nearest compatible Python type.
Masked values are converted to fill_value. If fill_value is None,
the corresponding entries in the output list will be ``None``.
"""
if fill_value is not None:
return self.filled(fill_value).tolist()
result = narray(self.filled().tolist(), dtype=object)
mask = narray(self._mask.tolist())
result[mask] = None
return result.tolist()
def __getstate__(self):
"""Return the internal state of the masked array.
This is for pickling.
"""
state = (1,
self.shape,
self.dtype,
self.flags.fnc,
self._data.tobytes(),
self._mask.tobytes(),
self._fill_value,
)
return state
def __setstate__(self, state):
"""
Restore the internal state of the masked array.
This is for pickling. ``state`` is typically the output of the
``__getstate__`` output, and is a 5-tuple:
- class name
- a tuple giving the shape of the data
- a typecode for the data
- a binary string for the data
- a binary string for the mask.
"""
(ver, shp, typ, isf, raw, msk, flv) = state
ndarray.__setstate__(self, (shp, typ, isf, raw))
mdtype = dtype([(k, bool_) for (k, _) in self.dtype.descr])
self.__dict__['_mask'].__setstate__((shp, mdtype, isf, msk))
self.fill_value = flv
def __reduce__(self):
"""
Return a 3-tuple for pickling a MaskedArray.
"""
return (_mrreconstruct,
(self.__class__, self._baseclass, (0,), 'b',),
self.__getstate__())
def _mrreconstruct(subtype, baseclass, baseshape, basetype,):
"""
Build a new MaskedArray from the information stored in a pickle.
"""
_data = ndarray.__new__(baseclass, baseshape, basetype).view(subtype)
_mask = ndarray.__new__(ndarray, baseshape, 'b1')
return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,)
mrecarray = MaskedRecords
###############################################################################
# Constructors #
###############################################################################
def fromarrays(arraylist, dtype=None, shape=None, formats=None,
names=None, titles=None, aligned=False, byteorder=None,
fill_value=None):
"""
Creates a mrecarray from a (flat) list of masked arrays.
Parameters
----------
arraylist : sequence
A list of (masked) arrays. Each element of the sequence is first converted
to a masked array if needed. If a 2D array is passed as argument, it is
processed line by line
dtype : {None, dtype}, optional
Data type descriptor.
shape : {None, integer}, optional
Number of records. If None, shape is defined from the shape of the
first array in the list.
formats : {None, sequence}, optional
Sequence of formats for each individual field. If None, the formats will
be autodetected by inspecting the fields and selecting the highest dtype
possible.
names : {None, sequence}, optional
Sequence of the names of each field.
fill_value : {None, sequence}, optional
Sequence of data to be used as filling values.
Notes
-----
Lists of tuples should be preferred over lists of lists for faster processing.
"""
datalist = [getdata(x) for x in arraylist]
masklist = [np.atleast_1d(getmaskarray(x)) for x in arraylist]
_array = recfromarrays(datalist,
dtype=dtype, shape=shape, formats=formats,
names=names, titles=titles, aligned=aligned,
byteorder=byteorder).view(mrecarray)
_array._mask.flat = list(zip(*masklist))
if fill_value is not None:
_array.fill_value = fill_value
return _array
def fromrecords(reclist, dtype=None, shape=None, formats=None, names=None,
titles=None, aligned=False, byteorder=None,
fill_value=None, mask=nomask):
"""
Creates a MaskedRecords from a list of records.
Parameters
----------
reclist : sequence
A list of records. Each element of the sequence is first converted
to a masked array if needed. If a 2D array is passed as argument, it is
processed line by line
dtype : {None, dtype}, optional
Data type descriptor.
shape : {None,int}, optional
Number of records. If None, ``shape`` is defined from the shape of the
first array in the list.
formats : {None, sequence}, optional
Sequence of formats for each individual field. If None, the formats will
be autodetected by inspecting the fields and selecting the highest dtype
possible.
names : {None, sequence}, optional
Sequence of the names of each field.
fill_value : {None, sequence}, optional
Sequence of data to be used as filling values.
mask : {nomask, sequence}, optional.
External mask to apply on the data.
Notes
-----
Lists of tuples should be preferred over lists of lists for faster processing.
"""
# Grab the initial _fieldmask, if needed:
_mask = getattr(reclist, '_mask', None)
# Get the list of records.
if isinstance(reclist, ndarray):
# Make sure we don't have some hidden mask
if isinstance(reclist, MaskedArray):
reclist = reclist.filled().view(ndarray)
# Grab the initial dtype, just in case
if dtype is None:
dtype = reclist.dtype
reclist = reclist.tolist()
mrec = recfromrecords(reclist, dtype=dtype, shape=shape, formats=formats,
names=names, titles=titles,
aligned=aligned, byteorder=byteorder).view(mrecarray)
# Set the fill_value if needed
if fill_value is not None:
mrec.fill_value = fill_value
# Now, let's deal w/ the mask
if mask is not nomask:
mask = np.array(mask, copy=False)
maskrecordlength = len(mask.dtype)
if maskrecordlength:
mrec._mask.flat = mask
elif len(mask.shape) == 2:
mrec._mask.flat = [tuple(m) for m in mask]
else:
mrec.__setmask__(mask)
if _mask is not None:
mrec._mask[:] = _mask
return mrec
def _guessvartypes(arr):
"""
Tries to guess the dtypes of the str_ ndarray `arr`.
Guesses by testing element-wise conversion. Returns a list of dtypes.
The array is first converted to ndarray. If the array is 2D, the test
is performed on the first line. An exception is raised if the file is
3D or more.
"""
vartypes = []
arr = np.asarray(arr)
if len(arr.shape) == 2:
arr = arr[0]
elif len(arr.shape) > 2:
raise ValueError("The array should be 2D at most!")
# Start the conversion loop.
for f in arr:
try:
int(f)
except ValueError:
try:
float(f)
except ValueError:
try:
complex(f)
except ValueError:
vartypes.append(arr.dtype)
else:
vartypes.append(np.dtype(complex))
else:
vartypes.append(np.dtype(float))
else:
vartypes.append(np.dtype(int))
return vartypes
def openfile(fname):
"""
Opens the file handle of file `fname`.
"""
# A file handle
if hasattr(fname, 'readline'):
return fname
# Try to open the file and guess its type
try:
f = open(fname)
except IOError:
raise IOError("No such file: '%s'" % fname)
if f.readline()[:2] != "\\x":
f.seek(0, 0)
return f
f.close()
raise NotImplementedError("Wow, binary file")
def fromtextfile(fname, delimitor=None, commentchar='#', missingchar='',
varnames=None, vartypes=None):
"""
Creates a mrecarray from data stored in the file `filename`.
Parameters
----------
fname : {file name/handle}
Handle of an opened file.
delimitor : {None, string}, optional
Alphanumeric character used to separate columns in the file.
If None, any (group of) white spacestring(s) will be used.
commentchar : {'#', string}, optional
Alphanumeric character used to mark the start of a comment.
missingchar : {'', string}, optional
String indicating missing data, and used to create the masks.
varnames : {None, sequence}, optional
Sequence of the variable names. If None, a list will be created from
the first non empty line of the file.
vartypes : {None, sequence}, optional
Sequence of the variables dtypes. If None, it will be estimated from
the first non-commented line.
Ultra simple: the varnames are in the header, one line"""
# Try to open the file.
ftext = openfile(fname)
# Get the first non-empty line as the varnames
while True:
line = ftext.readline()
firstline = line[:line.find(commentchar)].strip()
_varnames = firstline.split(delimitor)
if len(_varnames) > 1:
break
if varnames is None:
varnames = _varnames
# Get the data.
_variables = masked_array([line.strip().split(delimitor) for line in ftext
if line[0] != commentchar and len(line) > 1])
(_, nfields) = _variables.shape
ftext.close()
# Try to guess the dtype.
if vartypes is None:
vartypes = _guessvartypes(_variables[0])
else:
vartypes = [np.dtype(v) for v in vartypes]
if len(vartypes) != nfields:
msg = "Attempting to %i dtypes for %i fields!"
msg += " Reverting to default."
warnings.warn(msg % (len(vartypes), nfields))
vartypes = _guessvartypes(_variables[0])
# Construct the descriptor.
mdescr = [(n, f) for (n, f) in zip(varnames, vartypes)]
mfillv = [ma.default_fill_value(f) for f in vartypes]
# Get the data and the mask.
# We just need a list of masked_arrays. It's easier to create it like that:
_mask = (_variables.T == missingchar)
_datalist = [masked_array(a, mask=m, dtype=t, fill_value=f)
for (a, m, t, f) in zip(_variables.T, _mask, vartypes, mfillv)]
return fromarrays(_datalist, dtype=mdescr)
def addfield(mrecord, newfield, newfieldname=None):
"""Adds a new field to the masked record array
Uses `newfield` as data and `newfieldname` as name. If `newfieldname`
is None, the new field name is set to 'fi', where `i` is the number of
existing fields.
"""
_data = mrecord._data
_mask = mrecord._mask
if newfieldname is None or newfieldname in reserved_fields:
newfieldname = 'f%i' % len(_data.dtype)
newfield = ma.array(newfield)
# Get the new data.
# Create a new empty recarray
newdtype = np.dtype(_data.dtype.descr + [(newfieldname, newfield.dtype)])
newdata = recarray(_data.shape, newdtype)
# Add the exisintg field
[newdata.setfield(_data.getfield(*f), *f)
for f in _data.dtype.fields.values()]
# Add the new field
newdata.setfield(newfield._data, *newdata.dtype.fields[newfieldname])
newdata = newdata.view(MaskedRecords)
# Get the new mask
# Create a new empty recarray
newmdtype = np.dtype([(n, bool_) for n in newdtype.names])
newmask = recarray(_data.shape, newmdtype)
# Add the old masks
[newmask.setfield(_mask.getfield(*f), *f)
for f in _mask.dtype.fields.values()]
# Add the mask of the new field
newmask.setfield(getmaskarray(newfield),
*newmask.dtype.fields[newfieldname])
newdata._mask = newmask
return newdata
|
bsd-3-clause
| -1,049,926,491,071,613,800 | 4,948,979,864,966,768,000 | 33.400754 | 82 | 0.554687 | false |
ewjoachim/dropbox-sdk-python
|
dropbox/session.py
|
3
|
13077
|
import pkg_resources
import six
import ssl
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.poolmanager import PoolManager
_TRUSTED_CERT_FILE = pkg_resources.resource_filename(__name__, 'trusted-certs.crt')
# TODO(kelkabany): We probably only want to instantiate this once so that even
# if multiple Dropbox objects are instantiated, they all share the same pool.
class _SSLAdapter(HTTPAdapter):
def init_poolmanager(self, connections, maxsize, block=False):
self.poolmanager = PoolManager(num_pools=connections,
maxsize=maxsize,
block=block,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=_TRUSTED_CERT_FILE,
ssl_version=ssl.PROTOCOL_TLSv1)
def pinned_session(pool_maxsize=8):
http_adapter = _SSLAdapter(pool_connections=4,
pool_maxsize=pool_maxsize)
_session = requests.session()
_session.mount('https://', http_adapter)
return _session
"""
Deprecated: The code below is included only to support the use of the old v1
client class. It will be removed once v2 is at parity with v1. Do not use this
for any new functionality.
"""
import random
import six
import sys
import time
import urllib
try:
from urlparse import parse_qs
except ImportError:
# fall back for Python 2.5
from cgi import parse_qs
from . import rest
if six.PY3:
url_path_quote = urllib.parse.quote
url_encode = urllib.parse.urlencode
else:
url_path_quote = urllib.quote
url_encode = urllib.urlencode
class OAuthToken(object):
"""
A class representing an OAuth token. Contains two fields: ``key`` and
``secret``.
"""
def __init__(self, key, secret):
self.key = key
self.secret = secret
class BaseSession(object):
API_VERSION = 1
API_HOST = "api.dropbox.com"
WEB_HOST = "www.dropbox.com"
API_CONTENT_HOST = "api-content.dropbox.com"
API_NOTIFICATION_HOST = "api-notify.dropbox.com"
def __init__(self, consumer_key, consumer_secret, access_type="auto", locale=None, rest_client=rest.RESTClient):
"""Initialize a DropboxSession object.
Your consumer key and secret are available
at https://www.dropbox.com/developers/apps
Args:
- ``access_type``: Either 'auto' (the default), 'dropbox', or
'app_folder'. You probably don't need to specify this and should
just use the default.
- ``locale``: A locale string ('en', 'pt_PT', etc.) [optional]
The locale setting will be used to translate any user-facing error
messages that the server generates. At this time Dropbox supports
'en', 'es', 'fr', 'de', and 'ja', though we will be supporting more
languages in the future. If you send a language the server doesn't
support, messages will remain in English. Look for these translated
messages in rest.ErrorResponse exceptions as e.user_error_msg.
"""
assert access_type in ['dropbox', 'app_folder', 'auto'], "expected access_type of 'dropbox' or 'app_folder'"
self.consumer_creds = OAuthToken(consumer_key, consumer_secret)
self.token = None
self.request_token = None
self.root = 'sandbox' if access_type == 'app_folder' else access_type
self.locale = locale
self.rest_client = rest_client
def is_linked(self):
"""Return whether the DropboxSession has an access token attached."""
return bool(self.token)
def unlink(self):
"""Remove any attached access token from the DropboxSession."""
self.token = None
def build_path(self, target, params=None):
"""Build the path component for an API URL.
This method urlencodes the parameters, adds them
to the end of the target url, and puts a marker for the API
version in front.
Args:
- ``target``: A target url (e.g. '/files') to build upon.
- ``params``: A dictionary of parameters (name to value). [optional]
Returns:
- The path and parameters components of an API URL.
"""
if six.PY2 and isinstance(target, six.text_type):
target = target.encode("utf8")
target_path = url_path_quote(target)
params = params or {}
params = params.copy()
if self.locale:
params['locale'] = self.locale
if params:
return "/%s%s?%s" % (self.API_VERSION, target_path, url_encode(params))
else:
return "/%s%s" % (self.API_VERSION, target_path)
def build_url(self, host, target, params=None):
"""Build an API URL.
This method adds scheme and hostname to the path
returned from build_path.
Args:
- ``target``: A target url (e.g. '/files') to build upon.
- ``params``: A dictionary of parameters (name to value). [optional]
Returns:
- The full API URL.
"""
return "https://%s%s" % (host, self.build_path(target, params))
class DropboxSession(BaseSession):
def set_token(self, access_token, access_token_secret):
"""Attach an access token to the DropboxSession.
Note that the access 'token' is made up of both a token string
and a secret string.
"""
self.token = OAuthToken(access_token, access_token_secret)
def set_request_token(self, request_token, request_token_secret):
"""Attach an request token to the DropboxSession.
Note that the request 'token' is made up of both a token string
and a secret string.
"""
self.request_token = OAuthToken(request_token, request_token_secret)
def build_authorize_url(self, request_token, oauth_callback=None):
"""Build a request token authorization URL.
After obtaining a request token, you'll need to send the user to
the URL returned from this function so that they can confirm that
they want to connect their account to your app.
Args:
- ``request_token``: A request token from obtain_request_token.
- ``oauth_callback``: A url to redirect back to with the authorized
request token.
Returns:
- An authorization for the given request token.
"""
params = {'oauth_token': request_token.key,
}
if oauth_callback:
params['oauth_callback'] = oauth_callback
return self.build_url(self.WEB_HOST, '/oauth/authorize', params)
def obtain_request_token(self):
"""Obtain a request token from the Dropbox API.
This is your first step in the OAuth process. You call this to get a
request_token from the Dropbox server that you can then use with
DropboxSession.build_authorize_url() to get the user to authorize it.
After it's authorized you use this token with
DropboxSession.obtain_access_token() to get an access token.
NOTE: You should only need to do this once for each user, and then you
can store the access token for that user for later operations.
Returns:
- An :py:class:`OAuthToken` object representing the
request token Dropbox assigned to this app. Also attaches the
request token as self.request_token.
"""
self.token = None # clear any token currently on the request
url = self.build_url(self.API_HOST, '/oauth/request_token')
headers, params = self.build_access_headers('POST', url)
response = self.rest_client.POST(url, headers=headers, params=params, raw_response=True)
self.request_token = self._parse_token(response.read())
return self.request_token
def obtain_access_token(self, request_token=None):
"""Obtain an access token for a user.
After you get a request token, and then send the user to the authorize
URL, you can use the authorized request token with this method to get the
access token to use for future operations. The access token is stored on
the session object.
Args:
- ``request_token``: A request token from obtain_request_token. [optional]
The request_token should have been authorized via the
authorization url from build_authorize_url. If you don't pass
a request_token, the fallback is self.request_token, which
will exist if you previously called obtain_request_token on this
DropboxSession instance.
Returns:
- An :py:class:`OAuthToken` object with fields ``key`` and ``secret``
representing the access token Dropbox assigned to this app and
user. Also attaches the access token as self.token.
"""
request_token = request_token or self.request_token
assert request_token, "No request_token available on the session. Please pass one."
url = self.build_url(self.API_HOST, '/oauth/access_token')
headers, params = self.build_access_headers('POST', url, request_token=request_token)
response = self.rest_client.POST(url, headers=headers, params=params, raw_response=True)
self.token = self._parse_token(response.read())
return self.token
def build_access_headers(self, method, resource_url, params=None, request_token=None):
"""Build OAuth access headers for a future request.
Args:
- ``method``: The HTTP method being used (e.g. 'GET' or 'POST').
- ``resource_url``: The full url the request will be made to.
- ``params``: A dictionary of parameters to add to what's already on the url.
Typically, this would consist of POST parameters.
Returns:
- A tuple of (header_dict, params) where header_dict is a dictionary
of header names and values appropriate for passing into dropbox.rest.RESTClient
and params is a dictionary like the one that was passed in, but augmented with
oauth-related parameters as appropriate.
"""
if params is None:
params = {}
else:
params = params.copy()
oauth_params = {
'oauth_consumer_key' : self.consumer_creds.key,
'oauth_timestamp' : self._generate_oauth_timestamp(),
'oauth_nonce' : self._generate_oauth_nonce(),
'oauth_version' : self._oauth_version(),
}
token = request_token if request_token is not None else self.token
if token:
oauth_params['oauth_token'] = token.key
self._oauth_sign_request(oauth_params, self.consumer_creds, token)
headers = {
'Authorization':
'OAuth %s' % ','.join(
'%s="%s"' % (k, v) for k, v in six.iteritems(oauth_params))}
return headers, params
@classmethod
def _oauth_sign_request(cls, params, consumer_pair, token_pair):
params.update({'oauth_signature_method' : 'PLAINTEXT',
'oauth_signature' : ('%s&%s' % (consumer_pair.secret, token_pair.secret)
if token_pair is not None else
'%s&' % (consumer_pair.secret,))})
@classmethod
def _generate_oauth_timestamp(cls):
return int(time.time())
@classmethod
def _generate_oauth_nonce(cls, length=8):
return ''.join([str(random.randint(0, 9)) for i in range(length)])
@classmethod
def _oauth_version(cls):
return '1.0'
@classmethod
def _parse_token(cls, s):
if not s:
raise ValueError("Invalid parameter string.")
params = parse_qs(s, keep_blank_values=False)
if not params:
raise ValueError("Invalid parameter string: %r" % s)
try:
key = params['oauth_token'][0]
except Exception:
raise ValueError("'oauth_token' not found in OAuth request.")
try:
secret = params['oauth_token_secret'][0]
except Exception:
raise ValueError("'oauth_token_secret' not found in "
"OAuth request.")
return OAuthToken(key, secret)
# Don't use this class directly.
class DropboxOAuth2Session(BaseSession):
def __init__(self, oauth2_access_token, locale, rest_client=rest.RESTClient):
super(DropboxOAuth2Session, self).__init__("", "", "auto", locale=locale, rest_client=rest_client)
self.access_token = oauth2_access_token
def build_access_headers(self, method, resource_url, params=None, token=None):
assert token is None
headers = {"Authorization": "Bearer " + self.access_token}
return headers, params
|
mit
| 5,351,479,565,004,125,000 | -8,950,876,500,828,756,000 | 36.685879 | 116 | 0.613826 | false |
hutchison/bp_mgmt
|
bp_cupid/views/Student.py
|
1
|
2805
|
from django.shortcuts import render, get_object_or_404
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required, user_passes_test
from django.views.generic import View
from ..models import (
Student,
Gewicht,
Platz,
Zeitraum,
)
class StudentDetail(View):
template_name = 'bp_cupid/student.html'
@method_decorator(login_required)
@method_decorator(user_passes_test(lambda u: u.is_staff))
def get(self, request, mat_nr):
"""
Zeigt die Gewichte zu allen Praxen vom ausgewählten Studenten an.
"""
s = get_object_or_404(Student, mat_nr=mat_nr)
akt_verw_zr = request.user.mitarbeiter.akt_verw_zeitraum
aktuelle_zeitraeume = Zeitraum.objects.filter(
block__verwaltungszeitraum=akt_verw_zr,
)
gewichte = Gewicht.objects.filter(
student__mat_nr=mat_nr
).prefetch_related('praxis__freie_zeitraeume').order_by('-wert')
try:
platz = Platz.objects.select_related('praxis').get(student=s)
except Platz.DoesNotExist:
platz = None
context = {
'student': s,
'gewichte': gewichte,
'platz': platz,
'aktuelle_zeitraeume': aktuelle_zeitraeume,
}
return render(request, self.template_name, context)
class StudentList(View):
template_name = 'bp_cupid/studenten.html'
@method_decorator(login_required)
@method_decorator(user_passes_test(lambda u: u.is_staff))
def get(self, request):
"""
Zeigt alle Studenten in einer Tabelle nach Nachname sortiert an.
"""
akt_verw_zr = request.user.mitarbeiter.akt_verw_zeitraum
context = {
'studenten': Student.objects.filter(
verwaltungszeitraum=akt_verw_zr
).select_related(
'platz'
).prefetch_related(
'landkreise',
'bevorzugte_praxen',
).order_by('name'),
}
anz_studis = Student.objects.filter(
verwaltungszeitraum=akt_verw_zr
).count()
if anz_studis:
anz_studis_mit_fragebogen = Student.objects.filter(
verwaltungszeitraum=akt_verw_zr,
hat_fragebogen_ausgefuellt=True,
).count()
rel_fragebogen = round(
100 * anz_studis_mit_fragebogen / anz_studis, 1
)
context.update(
{
'anz_studis': anz_studis,
'anz_studis_mit_fragebogen': anz_studis_mit_fragebogen,
'rel_fragebogen': rel_fragebogen,
}
)
return render(request, self.template_name, context)
|
agpl-3.0
| 1,525,401,782,305,289,200 | -4,370,053,536,802,446,000 | 29.150538 | 75 | 0.578103 | false |
dmargala/qusp
|
examples/compare_delta.py
|
1
|
7364
|
#!/usr/bin/env python
import argparse
import numpy as np
import numpy.ma as ma
import h5py
import qusp
import matplotlib.pyplot as plt
import scipy.interpolate
import fitsio
class DeltaLOS(object):
def __init__(self, thing_id):
path = '/data/lya/deltas/delta-%d.fits' % thing_id
hdulist = fitsio.FITS(path, mode=fitsio.READONLY)
self.pmf = hdulist[1].read_header()['pmf']
self.loglam = hdulist[1]['loglam'][:]
self.wave = np.power(10.0, self.loglam)
self.delta = hdulist[1]['delta'][:]
self.weight = hdulist[1]['weight'][:]
self.cont = hdulist[1]['cont'][:]
self.msha = hdulist[1]['msha'][:]
self.mabs = hdulist[1]['mabs'][:]
self.ivar = hdulist[1]['ivar'][:]
self.cf = self.cont*self.msha*self.mabs
self.flux = (1+self.delta)*self.cf
def main():
# parse command-line arguments
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--verbose", action="store_true",
help="print verbose output")
## targets to fit
parser.add_argument("--name", type=str, default=None,
help="target list")
parser.add_argument("--gamma", type=float, default=3.8,
help="LSS growth and redshift evolution of mean absorption gamma")
parser.add_argument("--index", type=int, default=1000,
help="target index")
parser.add_argument("--pmf", type=str, default=None,
help="target plate-mjd-fiber string")
args = parser.parse_args()
print 'Loading forest data...'
# import data
skim = h5py.File(args.name+'.hdf5', 'r')
if args.pmf:
plate, mjd, fiber = [int(val) for val in args.pmf.split('-')]
index = np.where((skim['meta']['plate'] == plate) & (skim['meta']['mjd'] == mjd) & (skim['meta']['fiber'] == fiber))[0][0]
else:
index = args.index
flux = np.ma.MaskedArray(skim['flux'][index], mask=skim['mask'][index])
ivar = np.ma.MaskedArray(skim['ivar'][index], mask=skim['mask'][index])
loglam = skim['loglam'][:]
wave = np.power(10.0, loglam)
z = skim['z'][index]
norm = skim['norm'][index]
meta = skim['meta'][index]
linear_continuum = h5py.File(args.name+'-linear-continuum.hdf5', 'r')
a = linear_continuum['params_a'][index]
b = linear_continuum['params_b'][index]
continuum = linear_continuum['continuum']
continuum_wave = linear_continuum['continuum_wave']
continuum_interp = scipy.interpolate.UnivariateSpline(continuum_wave, continuum, ext=1, s=0)
abs_alpha = linear_continuum.attrs['abs_alpha']
abs_beta = linear_continuum.attrs['abs_beta']
forest_wave_ref = (1+z)*linear_continuum.attrs['forest_wave_ref']
wave_lya = linear_continuum.attrs['wave_lya']
forest_pixel_redshifts = wave/wave_lya - 1
abs_coefs = abs_alpha*np.power(1+forest_pixel_redshifts, abs_beta)
print 'flux 1280 Ang: %.2f' % norm
print 'fit param a: %.2f' % a
print 'fit param b: %.2f' % b
def model_flux(a, b):
return a*np.power(wave/forest_wave_ref, b)*continuum_interp(wave/(1+z))*np.exp(-abs_coefs)
def chisq(p):
mflux = model_flux(p[0], p[1])
res = flux - mflux
return ma.sum(res*res*ivar)/ma.sum(ivar)
from scipy.optimize import minimize
result = minimize(chisq, (a, b))
a,b = result.x
print 'fit param a: %.2f' % a
print 'fit param b: %.2f' % b
# rest and obs refer to pixel grid
print 'Estimating deltas in forest frame...'
mflux = model_flux(a,b)
delta_flux = flux/mflux - 1.0
delta_ivar = ivar*mflux*mflux
forest_min_z = linear_continuum.attrs['forest_min_z']
forest_max_z = linear_continuum.attrs['forest_max_z']
forest_dz = 0.1
forest_z_bins = np.arange(forest_min_z, forest_max_z + forest_dz, forest_dz)
print 'Adjusting weights for pipeline variance and LSS variance...'
var_lss = scipy.interpolate.UnivariateSpline(forest_z_bins, 0.05 + 0.06*(forest_z_bins - 2.0)**2, s=0)
var_pipe_scale = scipy.interpolate.UnivariateSpline(forest_z_bins, 0.7 + 0.2*(forest_z_bins - 2.0)**2, s=0)
delta_weight = delta_ivar*var_pipe_scale(forest_pixel_redshifts)
delta_weight = delta_weight/(1 + delta_weight*var_lss(forest_pixel_redshifts))
thing_id = meta['thing_id']
pmf = '%s-%s-%s' % (meta['plate'],meta['mjd'],meta['fiber'])
los = DeltaLOS(thing_id)
my_msha = norm*a*np.power(wave/forest_wave_ref, b)
my_wave = wave
my_flux = norm*flux
my_cf = my_msha*continuum_interp(wave/(1+z))*np.exp(-abs_coefs)
my_ivar = ivar/(norm*norm)
my_delta = delta_flux
my_weight = delta_weight
# mean_ratio = np.average(my_msha*continuum)/ma.average(los.msha*los.cont)
# print mean_ratio
plt.figure(figsize=(12,4))
plt.plot(my_wave, my_flux, color='gray')
my_dflux = ma.power(my_ivar, -0.5)
plt.fill_between(my_wave, my_flux - my_dflux, my_flux + my_dflux, color='gray', alpha=0.5)
plt.plot(my_wave, my_msha*continuum_interp(wave/(1+z)), label='My continuum', color='blue')
plt.plot(los.wave, los.cont, label='Busca continuum', color='red')
plt.plot(my_wave, my_cf, label='My cf', color='green')
plt.plot(los.wave, los.cf, label='Busca cf', color='orange')
plt.legend()
plt.title(r'%s (%s), $z$ = %.2f' % (pmf, thing_id, z))
plt.xlabel(r'Observed Wavelength ($\AA$)')
plt.ylabel(r'Observed Flux')
plt.xlim(los.wave[[0,-1]])
plt.savefig(args.name+'-example-flux.png', dpi=100, bbox_inches='tight')
plt.close()
plt.figure(figsize=(12,4))
my_delta_sigma = ma.power(delta_weight, -0.5)
# plt.fill_between(my_wave, my_delta - my_delta_sigma, my_delta + my_delta_sigma, color='blue', alpha=0.1, label='My Delta')
plt.scatter(my_wave, my_delta, color='blue', marker='+', label='My Delta')
plt.plot(my_wave, +my_delta_sigma, color='blue', ls=':')
plt.plot(my_wave, -my_delta_sigma, color='blue', ls=':')
los_delta_sigma = ma.power(los.weight, -0.5)
# plt.fill_between(los.wave, los.delta - los_delta_sigma, los.delta + los_delta_sigma, color='red', alpha=01, label='Busca Delta')
plt.scatter(los.wave, los.delta, color='red', marker='+', label='Busca Delta')
plt.plot(los.wave, +los_delta_sigma, color='red', ls=':')
plt.plot(los.wave, -los_delta_sigma, color='red', ls=':')
my_lss_sigma = np.sqrt(var_lss(forest_pixel_redshifts))
plt.plot(my_wave, +my_lss_sigma, color='black', ls='--')
plt.plot(my_wave, -my_lss_sigma, color='black', ls='--')
# my_sn_sigma = np.sqrt(np.power(1 + forest_pixel_redshifts, 0.5*abs_beta))/10
# plt.plot(my_wave, +my_sn_sigma, color='orange', ls='--')
# plt.plot(my_wave, -my_sn_sigma, color='orange', ls='--')
# import matplotlib.patches as mpatches
#
# blue_patch = mpatches.Patch(color='blue', alpha=0.3, label='My Delta')
# red_patch = mpatches.Patch(color='red', alpha=0.3, label='Busca Delta')
# plt.legend(handles=[blue_patch,red_patch])
plt.title(r'%s (%s), $z$ = %.2f' % (pmf, thing_id, z))
plt.ylim(-2,2)
plt.xlim(los.wave[[0,-1]])
plt.xlabel(r'Observed Wavelength ($\AA$)')
plt.ylabel(r'Delta')
plt.legend()
plt.savefig(args.name+'-example-delta.png', dpi=100, bbox_inches='tight')
plt.close()
if __name__ == '__main__':
main()
|
mit
| -3,315,522,396,865,362,400 | 9,137,039,929,900,042,000 | 36.380711 | 134 | 0.623303 | false |
edxnercel/edx-platform
|
lms/djangoapps/lti_provider/users.py
|
80
|
5166
|
"""
LTI user management functionality. This module reconciles the two identities
that an individual has in the campus LMS platform and on edX.
"""
import string
import random
import uuid
from django.conf import settings
from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User
from django.core.exceptions import PermissionDenied
from django.db import IntegrityError
from lti_provider.models import LtiUser
from student.models import UserProfile
def authenticate_lti_user(request, lti_user_id, lti_consumer):
"""
Determine whether the user specified by the LTI launch has an existing
account. If not, create a new Django User model and associate it with an
LtiUser object.
If the currently logged-in user does not match the user specified by the LTI
launch, log out the old user and log in the LTI identity.
"""
try:
lti_user = LtiUser.objects.get(
lti_user_id=lti_user_id,
lti_consumer=lti_consumer
)
except LtiUser.DoesNotExist:
# This is the first time that the user has been here. Create an account.
lti_user = create_lti_user(lti_user_id, lti_consumer)
if not (request.user.is_authenticated() and
request.user == lti_user.edx_user):
# The user is not authenticated, or is logged in as somebody else.
# Switch them to the LTI user
switch_user(request, lti_user, lti_consumer)
def create_lti_user(lti_user_id, lti_consumer):
"""
Generate a new user on the edX platform with a random username and password,
and associates that account with the LTI identity.
"""
edx_password = str(uuid.uuid4())
created = False
while not created:
try:
edx_user_id = generate_random_edx_username()
edx_email = "{}@{}".format(edx_user_id, settings.LTI_USER_EMAIL_DOMAIN)
edx_user = User.objects.create_user(
username=edx_user_id,
password=edx_password,
email=edx_email,
)
# A profile is required if PREVENT_CONCURRENT_LOGINS flag is set.
# TODO: We could populate user information from the LTI launch here,
# but it's not necessary for our current uses.
edx_user_profile = UserProfile(user=edx_user)
edx_user_profile.save()
created = True
except IntegrityError:
# The random edx_user_id wasn't unique. Since 'created' is still
# False, we will retry with a different random ID.
pass
lti_user = LtiUser(
lti_consumer=lti_consumer,
lti_user_id=lti_user_id,
edx_user=edx_user
)
lti_user.save()
return lti_user
def switch_user(request, lti_user, lti_consumer):
"""
Log out the current user, and log in using the edX identity associated with
the LTI ID.
"""
edx_user = authenticate(
username=lti_user.edx_user.username,
lti_user_id=lti_user.lti_user_id,
lti_consumer=lti_consumer
)
if not edx_user:
# This shouldn't happen, since we've created edX accounts for any LTI
# users by this point, but just in case we can return a 403.
raise PermissionDenied()
login(request, edx_user)
def generate_random_edx_username():
"""
Create a valid random edX user ID. An ID is at most 30 characters long, and
can contain upper and lowercase letters and numbers.
:return:
"""
allowable_chars = string.ascii_letters + string.digits
username = ''
for _index in range(30):
username = username + random.SystemRandom().choice(allowable_chars)
return username
class LtiBackend(object):
"""
A Django authentication backend that authenticates users via LTI. This
backend will only return a User object if it is associated with an LTI
identity (i.e. the user was created by the create_lti_user method above).
"""
def authenticate(self, username=None, lti_user_id=None, lti_consumer=None):
"""
Try to authenticate a user. This method will return a Django user object
if a user with the corresponding username exists in the database, and
if a record that links that user with an LTI user_id field exists in
the LtiUser collection.
If such a user is not found, the method returns None (in line with the
authentication backend specification).
"""
try:
edx_user = User.objects.get(username=username)
except User.DoesNotExist:
return None
try:
LtiUser.objects.get(
edx_user_id=edx_user.id,
lti_user_id=lti_user_id,
lti_consumer=lti_consumer
)
except LtiUser.DoesNotExist:
return None
return edx_user
def get_user(self, user_id):
"""
Return the User object for a user that has already been authenticated by
this backend.
"""
try:
return User.objects.get(id=user_id)
except User.DoesNotExist:
return None
|
agpl-3.0
| 3,144,029,392,475,713 | 3,472,936,722,072,727,600 | 33.211921 | 83 | 0.641115 | false |
botchat/botclassifier
|
pythonbotchat/cleverbot/cleverbot_chat.py
|
1
|
1770
|
"""
It is very simple selnium based code which cat with clevarbot and genrate log from chat
"""
import time
from datetime import datetime
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
def botcollectlog(url,name):
browser = webdriver.Firefox()
browser.get(url)
name_of_log = datetime.now().strftime(name+"_%Y_%m_%d_%H_%M_%S.log")
writelog = open(name_of_log,'w')
# below list will change after based on db creation current it is static.list should have at least 20-25 questions
list_q = ["how are you?" ,"are you good thinker?", "well me absolutly fine ","yes","i like know more about you"]
#submit = browser.find_element_by_css_selector("input.bb#sayit")
#submit.click()
#submit = browser.find_element_by_css_selector(".obscure.extend.show")
#submit.click()
for line in list_q:
input = browser.find_element_by_id("stimulus")
input.send_keys(line)
time.sleep(2)
submit = browser.find_element_by_css_selector("input.bb#sayit")
submit.click()
writelog.write("["+time.strftime("%H:%M:%S:%s")+"]code: "+line+"\n")
wait = WebDriverWait(browser, 40)
element = wait.until(EC.presence_of_element_located((By.ID, "snipTextIcon")))
text = browser.find_element_by_id("typArea")
while text.text.strip() == "":
time.sleep(1)
text = browser.find_element_by_id("typArea")
writelog.write("["+time.strftime("%H:%M:%S:%s")+"]bot: "+text.text+"\n")
writelog.close()
if __name__ == "__main__":
url = "http://www.cleverbot.com/"
name = "cleverbot"
botcollectlog(url,name)
|
mit
| -7,363,515,608,959,872,000 | -109,909,304,416,913,660 | 42.170732 | 118 | 0.655932 | false |
dbbhattacharya/kitsune
|
kitsune/users/migrations/0002_move_nbNO_to_no_locale.py
|
4
|
6682
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"""Switch all 'nb-NO' users to 'no'."""
orm.Profile.objects.filter(locale='nb-NO').update(locale='no')
def backwards(self, orm):
"""Switch all 'no' users to 'nb-NO'."""
orm.Profile.objects.filter(locale='no').update(locale='nb-NO')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'users.emailchange': {
'Meta': {'object_name': 'EmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'users.profile': {
'Meta': {'object_name': 'Profile'},
'avatar': ('django.db.models.fields.files.ImageField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'bio': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'facebook': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'irc_handle': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'locale': ('kitsune.sumo.models.LocaleField', [], {'default': "'en-US'", 'max_length': '7'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'public_email': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'timezone': ('timezones.fields.TimeZoneField', [], {'null': 'True', 'blank': 'True'}),
'twitter': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'users.registrationprofile': {
'Meta': {'object_name': 'RegistrationProfile'},
'activation_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'users.setting': {
'Meta': {'unique_together': "(('user', 'name'),)", 'object_name': 'Setting'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'settings'", 'to': "orm['auth.User']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'})
}
}
complete_apps = ['users']
symmetrical = True
|
bsd-3-clause
| -1,529,976,200,732,876,800 | 4,039,983,066,608,437,000 | 70.849462 | 182 | 0.542502 | false |
gtmanfred/livestreamer
|
src/livestreamer/plugins/common_swf.py
|
38
|
1135
|
from collections import namedtuple
from io import BytesIO
from livestreamer.utils import swfdecompress
from livestreamer.packages.flashmedia.types import U16LE, U32LE
__all__ = ["parse_swf"]
Rect = namedtuple("Rect", "data")
Tag = namedtuple("Tag", "type data")
SWF = namedtuple("SWF", "frame_size frame_rate frame_count tags")
def read_rect(fd):
header = ord(fd.read(1))
nbits = header >> 3
nbytes = int(((5 + 4 * nbits) + 7) / 8)
data = fd.read(nbytes - 1)
return Rect(data)
def read_tag(fd):
header = U16LE.read(fd)
tag_type = header >> 6
tag_length = header & 0x3f
if tag_length == 0x3f:
tag_length = U32LE.read(fd)
tag_data = fd.read(tag_length)
return Tag(tag_type, tag_data)
def read_tags(fd):
while True:
try:
yield read_tag(fd)
except IOError:
break
def parse_swf(data):
data = swfdecompress(data)
fd = BytesIO(data[8:])
frame_size = read_rect(fd)
frame_rate = U16LE.read(fd)
frame_count = U16LE.read(fd)
tags = list(read_tags(fd))
return SWF(frame_size, frame_rate, frame_count, tags)
|
bsd-2-clause
| -5,398,605,589,902,211,000 | -1,354,328,877,235,889,200 | 20.826923 | 65 | 0.623789 | false |
NicoloPernigo/portia
|
slybot/slybot/linkextractor/xml.py
|
15
|
1521
|
"""
Link extraction for auto scraping
"""
from scrapy.link import Link
from scrapy.selector import Selector
from slybot.linkextractor.base import BaseLinkExtractor
class XmlLinkExtractor(BaseLinkExtractor):
"""Link extractor for XML sources"""
def __init__(self, xpath, **kwargs):
self.remove_namespaces = kwargs.pop('remove_namespaces', False)
super(XmlLinkExtractor, self).__init__(**kwargs)
self.xpath = xpath
def _extract_links(self, response):
type = 'html'
if response.body_as_unicode().strip().startswith('<?xml version='):
type = 'xml'
xxs = Selector(response, type=type)
if self.remove_namespaces:
xxs.remove_namespaces()
for url in xxs.xpath(self.xpath).extract():
yield Link(url.encode(response.encoding))
class RssLinkExtractor(XmlLinkExtractor):
"""Link extraction from RSS feeds"""
def __init__(self, **kwargs):
super(RssLinkExtractor, self).__init__("//item/link/text()", **kwargs)
class SitemapLinkExtractor(XmlLinkExtractor):
"""Link extraction for sitemap.xml feeds"""
def __init__(self, **kwargs):
kwargs['remove_namespaces'] = True
super(SitemapLinkExtractor, self).__init__("//urlset/url/loc/text() | //sitemapindex/sitemap/loc/text()", **kwargs)
class AtomLinkExtractor(XmlLinkExtractor):
def __init__(self, **kwargs):
kwargs['remove_namespaces'] = True
super(AtomLinkExtractor, self).__init__("//link/@href", **kwargs)
|
bsd-3-clause
| 5,933,231,592,115,459,000 | -6,394,558,149,120,547,000 | 37.025 | 123 | 0.65286 | false |
hakehuang/rt-thread
|
components/external/freetype/src/tools/glnames.py
|
360
|
105239
|
#!/usr/bin/env python
#
#
# FreeType 2 glyph name builder
#
# Copyright 1996-2000, 2003, 2005, 2007, 2008, 2011 by
# David Turner, Robert Wilhelm, and Werner Lemberg.
#
# This file is part of the FreeType project, and may only be used, modified,
# and distributed under the terms of the FreeType project license,
# LICENSE.TXT. By continuing to use, modify, or distribute this file you
# indicate that you have read the license and understand and accept it
# fully.
"""\
usage: %s <output-file>
This python script generates the glyph names tables defined in the
`psnames' module.
Its single argument is the name of the header file to be created.
"""
import sys, string, struct, re, os.path
# This table lists the glyphs according to the Macintosh specification.
# It is used by the TrueType Postscript names table.
#
# See
#
# http://fonts.apple.com/TTRefMan/RM06/Chap6post.html
#
# for the official list.
#
mac_standard_names = \
[
# 0
".notdef", ".null", "nonmarkingreturn", "space", "exclam",
"quotedbl", "numbersign", "dollar", "percent", "ampersand",
# 10
"quotesingle", "parenleft", "parenright", "asterisk", "plus",
"comma", "hyphen", "period", "slash", "zero",
# 20
"one", "two", "three", "four", "five",
"six", "seven", "eight", "nine", "colon",
# 30
"semicolon", "less", "equal", "greater", "question",
"at", "A", "B", "C", "D",
# 40
"E", "F", "G", "H", "I",
"J", "K", "L", "M", "N",
# 50
"O", "P", "Q", "R", "S",
"T", "U", "V", "W", "X",
# 60
"Y", "Z", "bracketleft", "backslash", "bracketright",
"asciicircum", "underscore", "grave", "a", "b",
# 70
"c", "d", "e", "f", "g",
"h", "i", "j", "k", "l",
# 80
"m", "n", "o", "p", "q",
"r", "s", "t", "u", "v",
# 90
"w", "x", "y", "z", "braceleft",
"bar", "braceright", "asciitilde", "Adieresis", "Aring",
# 100
"Ccedilla", "Eacute", "Ntilde", "Odieresis", "Udieresis",
"aacute", "agrave", "acircumflex", "adieresis", "atilde",
# 110
"aring", "ccedilla", "eacute", "egrave", "ecircumflex",
"edieresis", "iacute", "igrave", "icircumflex", "idieresis",
# 120
"ntilde", "oacute", "ograve", "ocircumflex", "odieresis",
"otilde", "uacute", "ugrave", "ucircumflex", "udieresis",
# 130
"dagger", "degree", "cent", "sterling", "section",
"bullet", "paragraph", "germandbls", "registered", "copyright",
# 140
"trademark", "acute", "dieresis", "notequal", "AE",
"Oslash", "infinity", "plusminus", "lessequal", "greaterequal",
# 150
"yen", "mu", "partialdiff", "summation", "product",
"pi", "integral", "ordfeminine", "ordmasculine", "Omega",
# 160
"ae", "oslash", "questiondown", "exclamdown", "logicalnot",
"radical", "florin", "approxequal", "Delta", "guillemotleft",
# 170
"guillemotright", "ellipsis", "nonbreakingspace", "Agrave", "Atilde",
"Otilde", "OE", "oe", "endash", "emdash",
# 180
"quotedblleft", "quotedblright", "quoteleft", "quoteright", "divide",
"lozenge", "ydieresis", "Ydieresis", "fraction", "currency",
# 190
"guilsinglleft", "guilsinglright", "fi", "fl", "daggerdbl",
"periodcentered", "quotesinglbase", "quotedblbase", "perthousand",
"Acircumflex",
# 200
"Ecircumflex", "Aacute", "Edieresis", "Egrave", "Iacute",
"Icircumflex", "Idieresis", "Igrave", "Oacute", "Ocircumflex",
# 210
"apple", "Ograve", "Uacute", "Ucircumflex", "Ugrave",
"dotlessi", "circumflex", "tilde", "macron", "breve",
# 220
"dotaccent", "ring", "cedilla", "hungarumlaut", "ogonek",
"caron", "Lslash", "lslash", "Scaron", "scaron",
# 230
"Zcaron", "zcaron", "brokenbar", "Eth", "eth",
"Yacute", "yacute", "Thorn", "thorn", "minus",
# 240
"multiply", "onesuperior", "twosuperior", "threesuperior", "onehalf",
"onequarter", "threequarters", "franc", "Gbreve", "gbreve",
# 250
"Idotaccent", "Scedilla", "scedilla", "Cacute", "cacute",
"Ccaron", "ccaron", "dcroat"
]
# The list of standard `SID' glyph names. For the official list,
# see Annex A of document at
#
# http://partners.adobe.com/public/developer/en/font/5176.CFF.pdf .
#
sid_standard_names = \
[
# 0
".notdef", "space", "exclam", "quotedbl", "numbersign",
"dollar", "percent", "ampersand", "quoteright", "parenleft",
# 10
"parenright", "asterisk", "plus", "comma", "hyphen",
"period", "slash", "zero", "one", "two",
# 20
"three", "four", "five", "six", "seven",
"eight", "nine", "colon", "semicolon", "less",
# 30
"equal", "greater", "question", "at", "A",
"B", "C", "D", "E", "F",
# 40
"G", "H", "I", "J", "K",
"L", "M", "N", "O", "P",
# 50
"Q", "R", "S", "T", "U",
"V", "W", "X", "Y", "Z",
# 60
"bracketleft", "backslash", "bracketright", "asciicircum", "underscore",
"quoteleft", "a", "b", "c", "d",
# 70
"e", "f", "g", "h", "i",
"j", "k", "l", "m", "n",
# 80
"o", "p", "q", "r", "s",
"t", "u", "v", "w", "x",
# 90
"y", "z", "braceleft", "bar", "braceright",
"asciitilde", "exclamdown", "cent", "sterling", "fraction",
# 100
"yen", "florin", "section", "currency", "quotesingle",
"quotedblleft", "guillemotleft", "guilsinglleft", "guilsinglright", "fi",
# 110
"fl", "endash", "dagger", "daggerdbl", "periodcentered",
"paragraph", "bullet", "quotesinglbase", "quotedblbase", "quotedblright",
# 120
"guillemotright", "ellipsis", "perthousand", "questiondown", "grave",
"acute", "circumflex", "tilde", "macron", "breve",
# 130
"dotaccent", "dieresis", "ring", "cedilla", "hungarumlaut",
"ogonek", "caron", "emdash", "AE", "ordfeminine",
# 140
"Lslash", "Oslash", "OE", "ordmasculine", "ae",
"dotlessi", "lslash", "oslash", "oe", "germandbls",
# 150
"onesuperior", "logicalnot", "mu", "trademark", "Eth",
"onehalf", "plusminus", "Thorn", "onequarter", "divide",
# 160
"brokenbar", "degree", "thorn", "threequarters", "twosuperior",
"registered", "minus", "eth", "multiply", "threesuperior",
# 170
"copyright", "Aacute", "Acircumflex", "Adieresis", "Agrave",
"Aring", "Atilde", "Ccedilla", "Eacute", "Ecircumflex",
# 180
"Edieresis", "Egrave", "Iacute", "Icircumflex", "Idieresis",
"Igrave", "Ntilde", "Oacute", "Ocircumflex", "Odieresis",
# 190
"Ograve", "Otilde", "Scaron", "Uacute", "Ucircumflex",
"Udieresis", "Ugrave", "Yacute", "Ydieresis", "Zcaron",
# 200
"aacute", "acircumflex", "adieresis", "agrave", "aring",
"atilde", "ccedilla", "eacute", "ecircumflex", "edieresis",
# 210
"egrave", "iacute", "icircumflex", "idieresis", "igrave",
"ntilde", "oacute", "ocircumflex", "odieresis", "ograve",
# 220
"otilde", "scaron", "uacute", "ucircumflex", "udieresis",
"ugrave", "yacute", "ydieresis", "zcaron", "exclamsmall",
# 230
"Hungarumlautsmall", "dollaroldstyle", "dollarsuperior", "ampersandsmall",
"Acutesmall",
"parenleftsuperior", "parenrightsuperior", "twodotenleader",
"onedotenleader", "zerooldstyle",
# 240
"oneoldstyle", "twooldstyle", "threeoldstyle", "fouroldstyle",
"fiveoldstyle",
"sixoldstyle", "sevenoldstyle", "eightoldstyle", "nineoldstyle",
"commasuperior",
# 250
"threequartersemdash", "periodsuperior", "questionsmall", "asuperior",
"bsuperior",
"centsuperior", "dsuperior", "esuperior", "isuperior", "lsuperior",
# 260
"msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior",
"tsuperior", "ff", "ffi", "ffl", "parenleftinferior",
# 270
"parenrightinferior", "Circumflexsmall", "hyphensuperior", "Gravesmall",
"Asmall",
"Bsmall", "Csmall", "Dsmall", "Esmall", "Fsmall",
# 280
"Gsmall", "Hsmall", "Ismall", "Jsmall", "Ksmall",
"Lsmall", "Msmall", "Nsmall", "Osmall", "Psmall",
# 290
"Qsmall", "Rsmall", "Ssmall", "Tsmall", "Usmall",
"Vsmall", "Wsmall", "Xsmall", "Ysmall", "Zsmall",
# 300
"colonmonetary", "onefitted", "rupiah", "Tildesmall", "exclamdownsmall",
"centoldstyle", "Lslashsmall", "Scaronsmall", "Zcaronsmall",
"Dieresissmall",
# 310
"Brevesmall", "Caronsmall", "Dotaccentsmall", "Macronsmall", "figuredash",
"hypheninferior", "Ogoneksmall", "Ringsmall", "Cedillasmall",
"questiondownsmall",
# 320
"oneeighth", "threeeighths", "fiveeighths", "seveneighths", "onethird",
"twothirds", "zerosuperior", "foursuperior", "fivesuperior",
"sixsuperior",
# 330
"sevensuperior", "eightsuperior", "ninesuperior", "zeroinferior",
"oneinferior",
"twoinferior", "threeinferior", "fourinferior", "fiveinferior",
"sixinferior",
# 340
"seveninferior", "eightinferior", "nineinferior", "centinferior",
"dollarinferior",
"periodinferior", "commainferior", "Agravesmall", "Aacutesmall",
"Acircumflexsmall",
# 350
"Atildesmall", "Adieresissmall", "Aringsmall", "AEsmall", "Ccedillasmall",
"Egravesmall", "Eacutesmall", "Ecircumflexsmall", "Edieresissmall",
"Igravesmall",
# 360
"Iacutesmall", "Icircumflexsmall", "Idieresissmall", "Ethsmall",
"Ntildesmall",
"Ogravesmall", "Oacutesmall", "Ocircumflexsmall", "Otildesmall",
"Odieresissmall",
# 370
"OEsmall", "Oslashsmall", "Ugravesmall", "Uacutesmall",
"Ucircumflexsmall",
"Udieresissmall", "Yacutesmall", "Thornsmall", "Ydieresissmall",
"001.000",
# 380
"001.001", "001.002", "001.003", "Black", "Bold",
"Book", "Light", "Medium", "Regular", "Roman",
# 390
"Semibold"
]
# This table maps character codes of the Adobe Standard Type 1
# encoding to glyph indices in the sid_standard_names table.
#
t1_standard_encoding = \
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
69, 70, 71, 72, 73, 74, 75, 76, 77, 78,
79, 80, 81, 82, 83, 84, 85, 86, 87, 88,
89, 90, 91, 92, 93, 94, 95, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 96, 97, 98, 99, 100, 101, 102, 103, 104,
105, 106, 107, 108, 109, 110, 0, 111, 112, 113,
114, 0, 115, 116, 117, 118, 119, 120, 121, 122,
0, 123, 0, 124, 125, 126, 127, 128, 129, 130,
131, 0, 132, 133, 0, 134, 135, 136, 137, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 138, 0, 139, 0, 0,
0, 0, 140, 141, 142, 143, 0, 0, 0, 0,
0, 144, 0, 0, 0, 145, 0, 0, 146, 147,
148, 149, 0, 0, 0, 0
]
# This table maps character codes of the Adobe Expert Type 1
# encoding to glyph indices in the sid_standard_names table.
#
t1_expert_encoding = \
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 229, 230, 0, 231, 232, 233, 234,
235, 236, 237, 238, 13, 14, 15, 99, 239, 240,
241, 242, 243, 244, 245, 246, 247, 248, 27, 28,
249, 250, 251, 252, 0, 253, 254, 255, 256, 257,
0, 0, 0, 258, 0, 0, 259, 260, 261, 262,
0, 0, 263, 264, 265, 0, 266, 109, 110, 267,
268, 269, 0, 270, 271, 272, 273, 274, 275, 276,
277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
287, 288, 289, 290, 291, 292, 293, 294, 295, 296,
297, 298, 299, 300, 301, 302, 303, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 304, 305, 306, 0, 0, 307, 308, 309, 310,
311, 0, 312, 0, 0, 313, 0, 0, 314, 315,
0, 0, 316, 317, 318, 0, 0, 0, 158, 155,
163, 319, 320, 321, 322, 323, 324, 325, 0, 0,
326, 150, 164, 169, 327, 328, 329, 330, 331, 332,
333, 334, 335, 336, 337, 338, 339, 340, 341, 342,
343, 344, 345, 346, 347, 348, 349, 350, 351, 352,
353, 354, 355, 356, 357, 358, 359, 360, 361, 362,
363, 364, 365, 366, 367, 368, 369, 370, 371, 372,
373, 374, 375, 376, 377, 378
]
# This data has been taken literally from the files `glyphlist.txt'
# and `zapfdingbats.txt' version 2.0, Sept 2002. It is available from
#
# http://sourceforge.net/adobe/aglfn/
#
adobe_glyph_list = """\
A;0041
AE;00C6
AEacute;01FC
AEmacron;01E2
AEsmall;F7E6
Aacute;00C1
Aacutesmall;F7E1
Abreve;0102
Abreveacute;1EAE
Abrevecyrillic;04D0
Abrevedotbelow;1EB6
Abrevegrave;1EB0
Abrevehookabove;1EB2
Abrevetilde;1EB4
Acaron;01CD
Acircle;24B6
Acircumflex;00C2
Acircumflexacute;1EA4
Acircumflexdotbelow;1EAC
Acircumflexgrave;1EA6
Acircumflexhookabove;1EA8
Acircumflexsmall;F7E2
Acircumflextilde;1EAA
Acute;F6C9
Acutesmall;F7B4
Acyrillic;0410
Adblgrave;0200
Adieresis;00C4
Adieresiscyrillic;04D2
Adieresismacron;01DE
Adieresissmall;F7E4
Adotbelow;1EA0
Adotmacron;01E0
Agrave;00C0
Agravesmall;F7E0
Ahookabove;1EA2
Aiecyrillic;04D4
Ainvertedbreve;0202
Alpha;0391
Alphatonos;0386
Amacron;0100
Amonospace;FF21
Aogonek;0104
Aring;00C5
Aringacute;01FA
Aringbelow;1E00
Aringsmall;F7E5
Asmall;F761
Atilde;00C3
Atildesmall;F7E3
Aybarmenian;0531
B;0042
Bcircle;24B7
Bdotaccent;1E02
Bdotbelow;1E04
Becyrillic;0411
Benarmenian;0532
Beta;0392
Bhook;0181
Blinebelow;1E06
Bmonospace;FF22
Brevesmall;F6F4
Bsmall;F762
Btopbar;0182
C;0043
Caarmenian;053E
Cacute;0106
Caron;F6CA
Caronsmall;F6F5
Ccaron;010C
Ccedilla;00C7
Ccedillaacute;1E08
Ccedillasmall;F7E7
Ccircle;24B8
Ccircumflex;0108
Cdot;010A
Cdotaccent;010A
Cedillasmall;F7B8
Chaarmenian;0549
Cheabkhasiancyrillic;04BC
Checyrillic;0427
Chedescenderabkhasiancyrillic;04BE
Chedescendercyrillic;04B6
Chedieresiscyrillic;04F4
Cheharmenian;0543
Chekhakassiancyrillic;04CB
Cheverticalstrokecyrillic;04B8
Chi;03A7
Chook;0187
Circumflexsmall;F6F6
Cmonospace;FF23
Coarmenian;0551
Csmall;F763
D;0044
DZ;01F1
DZcaron;01C4
Daarmenian;0534
Dafrican;0189
Dcaron;010E
Dcedilla;1E10
Dcircle;24B9
Dcircumflexbelow;1E12
Dcroat;0110
Ddotaccent;1E0A
Ddotbelow;1E0C
Decyrillic;0414
Deicoptic;03EE
Delta;2206
Deltagreek;0394
Dhook;018A
Dieresis;F6CB
DieresisAcute;F6CC
DieresisGrave;F6CD
Dieresissmall;F7A8
Digammagreek;03DC
Djecyrillic;0402
Dlinebelow;1E0E
Dmonospace;FF24
Dotaccentsmall;F6F7
Dslash;0110
Dsmall;F764
Dtopbar;018B
Dz;01F2
Dzcaron;01C5
Dzeabkhasiancyrillic;04E0
Dzecyrillic;0405
Dzhecyrillic;040F
E;0045
Eacute;00C9
Eacutesmall;F7E9
Ebreve;0114
Ecaron;011A
Ecedillabreve;1E1C
Echarmenian;0535
Ecircle;24BA
Ecircumflex;00CA
Ecircumflexacute;1EBE
Ecircumflexbelow;1E18
Ecircumflexdotbelow;1EC6
Ecircumflexgrave;1EC0
Ecircumflexhookabove;1EC2
Ecircumflexsmall;F7EA
Ecircumflextilde;1EC4
Ecyrillic;0404
Edblgrave;0204
Edieresis;00CB
Edieresissmall;F7EB
Edot;0116
Edotaccent;0116
Edotbelow;1EB8
Efcyrillic;0424
Egrave;00C8
Egravesmall;F7E8
Eharmenian;0537
Ehookabove;1EBA
Eightroman;2167
Einvertedbreve;0206
Eiotifiedcyrillic;0464
Elcyrillic;041B
Elevenroman;216A
Emacron;0112
Emacronacute;1E16
Emacrongrave;1E14
Emcyrillic;041C
Emonospace;FF25
Encyrillic;041D
Endescendercyrillic;04A2
Eng;014A
Enghecyrillic;04A4
Enhookcyrillic;04C7
Eogonek;0118
Eopen;0190
Epsilon;0395
Epsilontonos;0388
Ercyrillic;0420
Ereversed;018E
Ereversedcyrillic;042D
Escyrillic;0421
Esdescendercyrillic;04AA
Esh;01A9
Esmall;F765
Eta;0397
Etarmenian;0538
Etatonos;0389
Eth;00D0
Ethsmall;F7F0
Etilde;1EBC
Etildebelow;1E1A
Euro;20AC
Ezh;01B7
Ezhcaron;01EE
Ezhreversed;01B8
F;0046
Fcircle;24BB
Fdotaccent;1E1E
Feharmenian;0556
Feicoptic;03E4
Fhook;0191
Fitacyrillic;0472
Fiveroman;2164
Fmonospace;FF26
Fourroman;2163
Fsmall;F766
G;0047
GBsquare;3387
Gacute;01F4
Gamma;0393
Gammaafrican;0194
Gangiacoptic;03EA
Gbreve;011E
Gcaron;01E6
Gcedilla;0122
Gcircle;24BC
Gcircumflex;011C
Gcommaaccent;0122
Gdot;0120
Gdotaccent;0120
Gecyrillic;0413
Ghadarmenian;0542
Ghemiddlehookcyrillic;0494
Ghestrokecyrillic;0492
Gheupturncyrillic;0490
Ghook;0193
Gimarmenian;0533
Gjecyrillic;0403
Gmacron;1E20
Gmonospace;FF27
Grave;F6CE
Gravesmall;F760
Gsmall;F767
Gsmallhook;029B
Gstroke;01E4
H;0048
H18533;25CF
H18543;25AA
H18551;25AB
H22073;25A1
HPsquare;33CB
Haabkhasiancyrillic;04A8
Hadescendercyrillic;04B2
Hardsigncyrillic;042A
Hbar;0126
Hbrevebelow;1E2A
Hcedilla;1E28
Hcircle;24BD
Hcircumflex;0124
Hdieresis;1E26
Hdotaccent;1E22
Hdotbelow;1E24
Hmonospace;FF28
Hoarmenian;0540
Horicoptic;03E8
Hsmall;F768
Hungarumlaut;F6CF
Hungarumlautsmall;F6F8
Hzsquare;3390
I;0049
IAcyrillic;042F
IJ;0132
IUcyrillic;042E
Iacute;00CD
Iacutesmall;F7ED
Ibreve;012C
Icaron;01CF
Icircle;24BE
Icircumflex;00CE
Icircumflexsmall;F7EE
Icyrillic;0406
Idblgrave;0208
Idieresis;00CF
Idieresisacute;1E2E
Idieresiscyrillic;04E4
Idieresissmall;F7EF
Idot;0130
Idotaccent;0130
Idotbelow;1ECA
Iebrevecyrillic;04D6
Iecyrillic;0415
Ifraktur;2111
Igrave;00CC
Igravesmall;F7EC
Ihookabove;1EC8
Iicyrillic;0418
Iinvertedbreve;020A
Iishortcyrillic;0419
Imacron;012A
Imacroncyrillic;04E2
Imonospace;FF29
Iniarmenian;053B
Iocyrillic;0401
Iogonek;012E
Iota;0399
Iotaafrican;0196
Iotadieresis;03AA
Iotatonos;038A
Ismall;F769
Istroke;0197
Itilde;0128
Itildebelow;1E2C
Izhitsacyrillic;0474
Izhitsadblgravecyrillic;0476
J;004A
Jaarmenian;0541
Jcircle;24BF
Jcircumflex;0134
Jecyrillic;0408
Jheharmenian;054B
Jmonospace;FF2A
Jsmall;F76A
K;004B
KBsquare;3385
KKsquare;33CD
Kabashkircyrillic;04A0
Kacute;1E30
Kacyrillic;041A
Kadescendercyrillic;049A
Kahookcyrillic;04C3
Kappa;039A
Kastrokecyrillic;049E
Kaverticalstrokecyrillic;049C
Kcaron;01E8
Kcedilla;0136
Kcircle;24C0
Kcommaaccent;0136
Kdotbelow;1E32
Keharmenian;0554
Kenarmenian;053F
Khacyrillic;0425
Kheicoptic;03E6
Khook;0198
Kjecyrillic;040C
Klinebelow;1E34
Kmonospace;FF2B
Koppacyrillic;0480
Koppagreek;03DE
Ksicyrillic;046E
Ksmall;F76B
L;004C
LJ;01C7
LL;F6BF
Lacute;0139
Lambda;039B
Lcaron;013D
Lcedilla;013B
Lcircle;24C1
Lcircumflexbelow;1E3C
Lcommaaccent;013B
Ldot;013F
Ldotaccent;013F
Ldotbelow;1E36
Ldotbelowmacron;1E38
Liwnarmenian;053C
Lj;01C8
Ljecyrillic;0409
Llinebelow;1E3A
Lmonospace;FF2C
Lslash;0141
Lslashsmall;F6F9
Lsmall;F76C
M;004D
MBsquare;3386
Macron;F6D0
Macronsmall;F7AF
Macute;1E3E
Mcircle;24C2
Mdotaccent;1E40
Mdotbelow;1E42
Menarmenian;0544
Mmonospace;FF2D
Msmall;F76D
Mturned;019C
Mu;039C
N;004E
NJ;01CA
Nacute;0143
Ncaron;0147
Ncedilla;0145
Ncircle;24C3
Ncircumflexbelow;1E4A
Ncommaaccent;0145
Ndotaccent;1E44
Ndotbelow;1E46
Nhookleft;019D
Nineroman;2168
Nj;01CB
Njecyrillic;040A
Nlinebelow;1E48
Nmonospace;FF2E
Nowarmenian;0546
Nsmall;F76E
Ntilde;00D1
Ntildesmall;F7F1
Nu;039D
O;004F
OE;0152
OEsmall;F6FA
Oacute;00D3
Oacutesmall;F7F3
Obarredcyrillic;04E8
Obarreddieresiscyrillic;04EA
Obreve;014E
Ocaron;01D1
Ocenteredtilde;019F
Ocircle;24C4
Ocircumflex;00D4
Ocircumflexacute;1ED0
Ocircumflexdotbelow;1ED8
Ocircumflexgrave;1ED2
Ocircumflexhookabove;1ED4
Ocircumflexsmall;F7F4
Ocircumflextilde;1ED6
Ocyrillic;041E
Odblacute;0150
Odblgrave;020C
Odieresis;00D6
Odieresiscyrillic;04E6
Odieresissmall;F7F6
Odotbelow;1ECC
Ogoneksmall;F6FB
Ograve;00D2
Ogravesmall;F7F2
Oharmenian;0555
Ohm;2126
Ohookabove;1ECE
Ohorn;01A0
Ohornacute;1EDA
Ohorndotbelow;1EE2
Ohorngrave;1EDC
Ohornhookabove;1EDE
Ohorntilde;1EE0
Ohungarumlaut;0150
Oi;01A2
Oinvertedbreve;020E
Omacron;014C
Omacronacute;1E52
Omacrongrave;1E50
Omega;2126
Omegacyrillic;0460
Omegagreek;03A9
Omegaroundcyrillic;047A
Omegatitlocyrillic;047C
Omegatonos;038F
Omicron;039F
Omicrontonos;038C
Omonospace;FF2F
Oneroman;2160
Oogonek;01EA
Oogonekmacron;01EC
Oopen;0186
Oslash;00D8
Oslashacute;01FE
Oslashsmall;F7F8
Osmall;F76F
Ostrokeacute;01FE
Otcyrillic;047E
Otilde;00D5
Otildeacute;1E4C
Otildedieresis;1E4E
Otildesmall;F7F5
P;0050
Pacute;1E54
Pcircle;24C5
Pdotaccent;1E56
Pecyrillic;041F
Peharmenian;054A
Pemiddlehookcyrillic;04A6
Phi;03A6
Phook;01A4
Pi;03A0
Piwrarmenian;0553
Pmonospace;FF30
Psi;03A8
Psicyrillic;0470
Psmall;F770
Q;0051
Qcircle;24C6
Qmonospace;FF31
Qsmall;F771
R;0052
Raarmenian;054C
Racute;0154
Rcaron;0158
Rcedilla;0156
Rcircle;24C7
Rcommaaccent;0156
Rdblgrave;0210
Rdotaccent;1E58
Rdotbelow;1E5A
Rdotbelowmacron;1E5C
Reharmenian;0550
Rfraktur;211C
Rho;03A1
Ringsmall;F6FC
Rinvertedbreve;0212
Rlinebelow;1E5E
Rmonospace;FF32
Rsmall;F772
Rsmallinverted;0281
Rsmallinvertedsuperior;02B6
S;0053
SF010000;250C
SF020000;2514
SF030000;2510
SF040000;2518
SF050000;253C
SF060000;252C
SF070000;2534
SF080000;251C
SF090000;2524
SF100000;2500
SF110000;2502
SF190000;2561
SF200000;2562
SF210000;2556
SF220000;2555
SF230000;2563
SF240000;2551
SF250000;2557
SF260000;255D
SF270000;255C
SF280000;255B
SF360000;255E
SF370000;255F
SF380000;255A
SF390000;2554
SF400000;2569
SF410000;2566
SF420000;2560
SF430000;2550
SF440000;256C
SF450000;2567
SF460000;2568
SF470000;2564
SF480000;2565
SF490000;2559
SF500000;2558
SF510000;2552
SF520000;2553
SF530000;256B
SF540000;256A
Sacute;015A
Sacutedotaccent;1E64
Sampigreek;03E0
Scaron;0160
Scarondotaccent;1E66
Scaronsmall;F6FD
Scedilla;015E
Schwa;018F
Schwacyrillic;04D8
Schwadieresiscyrillic;04DA
Scircle;24C8
Scircumflex;015C
Scommaaccent;0218
Sdotaccent;1E60
Sdotbelow;1E62
Sdotbelowdotaccent;1E68
Seharmenian;054D
Sevenroman;2166
Shaarmenian;0547
Shacyrillic;0428
Shchacyrillic;0429
Sheicoptic;03E2
Shhacyrillic;04BA
Shimacoptic;03EC
Sigma;03A3
Sixroman;2165
Smonospace;FF33
Softsigncyrillic;042C
Ssmall;F773
Stigmagreek;03DA
T;0054
Tau;03A4
Tbar;0166
Tcaron;0164
Tcedilla;0162
Tcircle;24C9
Tcircumflexbelow;1E70
Tcommaaccent;0162
Tdotaccent;1E6A
Tdotbelow;1E6C
Tecyrillic;0422
Tedescendercyrillic;04AC
Tenroman;2169
Tetsecyrillic;04B4
Theta;0398
Thook;01AC
Thorn;00DE
Thornsmall;F7FE
Threeroman;2162
Tildesmall;F6FE
Tiwnarmenian;054F
Tlinebelow;1E6E
Tmonospace;FF34
Toarmenian;0539
Tonefive;01BC
Tonesix;0184
Tonetwo;01A7
Tretroflexhook;01AE
Tsecyrillic;0426
Tshecyrillic;040B
Tsmall;F774
Twelveroman;216B
Tworoman;2161
U;0055
Uacute;00DA
Uacutesmall;F7FA
Ubreve;016C
Ucaron;01D3
Ucircle;24CA
Ucircumflex;00DB
Ucircumflexbelow;1E76
Ucircumflexsmall;F7FB
Ucyrillic;0423
Udblacute;0170
Udblgrave;0214
Udieresis;00DC
Udieresisacute;01D7
Udieresisbelow;1E72
Udieresiscaron;01D9
Udieresiscyrillic;04F0
Udieresisgrave;01DB
Udieresismacron;01D5
Udieresissmall;F7FC
Udotbelow;1EE4
Ugrave;00D9
Ugravesmall;F7F9
Uhookabove;1EE6
Uhorn;01AF
Uhornacute;1EE8
Uhorndotbelow;1EF0
Uhorngrave;1EEA
Uhornhookabove;1EEC
Uhorntilde;1EEE
Uhungarumlaut;0170
Uhungarumlautcyrillic;04F2
Uinvertedbreve;0216
Ukcyrillic;0478
Umacron;016A
Umacroncyrillic;04EE
Umacrondieresis;1E7A
Umonospace;FF35
Uogonek;0172
Upsilon;03A5
Upsilon1;03D2
Upsilonacutehooksymbolgreek;03D3
Upsilonafrican;01B1
Upsilondieresis;03AB
Upsilondieresishooksymbolgreek;03D4
Upsilonhooksymbol;03D2
Upsilontonos;038E
Uring;016E
Ushortcyrillic;040E
Usmall;F775
Ustraightcyrillic;04AE
Ustraightstrokecyrillic;04B0
Utilde;0168
Utildeacute;1E78
Utildebelow;1E74
V;0056
Vcircle;24CB
Vdotbelow;1E7E
Vecyrillic;0412
Vewarmenian;054E
Vhook;01B2
Vmonospace;FF36
Voarmenian;0548
Vsmall;F776
Vtilde;1E7C
W;0057
Wacute;1E82
Wcircle;24CC
Wcircumflex;0174
Wdieresis;1E84
Wdotaccent;1E86
Wdotbelow;1E88
Wgrave;1E80
Wmonospace;FF37
Wsmall;F777
X;0058
Xcircle;24CD
Xdieresis;1E8C
Xdotaccent;1E8A
Xeharmenian;053D
Xi;039E
Xmonospace;FF38
Xsmall;F778
Y;0059
Yacute;00DD
Yacutesmall;F7FD
Yatcyrillic;0462
Ycircle;24CE
Ycircumflex;0176
Ydieresis;0178
Ydieresissmall;F7FF
Ydotaccent;1E8E
Ydotbelow;1EF4
Yericyrillic;042B
Yerudieresiscyrillic;04F8
Ygrave;1EF2
Yhook;01B3
Yhookabove;1EF6
Yiarmenian;0545
Yicyrillic;0407
Yiwnarmenian;0552
Ymonospace;FF39
Ysmall;F779
Ytilde;1EF8
Yusbigcyrillic;046A
Yusbigiotifiedcyrillic;046C
Yuslittlecyrillic;0466
Yuslittleiotifiedcyrillic;0468
Z;005A
Zaarmenian;0536
Zacute;0179
Zcaron;017D
Zcaronsmall;F6FF
Zcircle;24CF
Zcircumflex;1E90
Zdot;017B
Zdotaccent;017B
Zdotbelow;1E92
Zecyrillic;0417
Zedescendercyrillic;0498
Zedieresiscyrillic;04DE
Zeta;0396
Zhearmenian;053A
Zhebrevecyrillic;04C1
Zhecyrillic;0416
Zhedescendercyrillic;0496
Zhedieresiscyrillic;04DC
Zlinebelow;1E94
Zmonospace;FF3A
Zsmall;F77A
Zstroke;01B5
a;0061
aabengali;0986
aacute;00E1
aadeva;0906
aagujarati;0A86
aagurmukhi;0A06
aamatragurmukhi;0A3E
aarusquare;3303
aavowelsignbengali;09BE
aavowelsigndeva;093E
aavowelsigngujarati;0ABE
abbreviationmarkarmenian;055F
abbreviationsigndeva;0970
abengali;0985
abopomofo;311A
abreve;0103
abreveacute;1EAF
abrevecyrillic;04D1
abrevedotbelow;1EB7
abrevegrave;1EB1
abrevehookabove;1EB3
abrevetilde;1EB5
acaron;01CE
acircle;24D0
acircumflex;00E2
acircumflexacute;1EA5
acircumflexdotbelow;1EAD
acircumflexgrave;1EA7
acircumflexhookabove;1EA9
acircumflextilde;1EAB
acute;00B4
acutebelowcmb;0317
acutecmb;0301
acutecomb;0301
acutedeva;0954
acutelowmod;02CF
acutetonecmb;0341
acyrillic;0430
adblgrave;0201
addakgurmukhi;0A71
adeva;0905
adieresis;00E4
adieresiscyrillic;04D3
adieresismacron;01DF
adotbelow;1EA1
adotmacron;01E1
ae;00E6
aeacute;01FD
aekorean;3150
aemacron;01E3
afii00208;2015
afii08941;20A4
afii10017;0410
afii10018;0411
afii10019;0412
afii10020;0413
afii10021;0414
afii10022;0415
afii10023;0401
afii10024;0416
afii10025;0417
afii10026;0418
afii10027;0419
afii10028;041A
afii10029;041B
afii10030;041C
afii10031;041D
afii10032;041E
afii10033;041F
afii10034;0420
afii10035;0421
afii10036;0422
afii10037;0423
afii10038;0424
afii10039;0425
afii10040;0426
afii10041;0427
afii10042;0428
afii10043;0429
afii10044;042A
afii10045;042B
afii10046;042C
afii10047;042D
afii10048;042E
afii10049;042F
afii10050;0490
afii10051;0402
afii10052;0403
afii10053;0404
afii10054;0405
afii10055;0406
afii10056;0407
afii10057;0408
afii10058;0409
afii10059;040A
afii10060;040B
afii10061;040C
afii10062;040E
afii10063;F6C4
afii10064;F6C5
afii10065;0430
afii10066;0431
afii10067;0432
afii10068;0433
afii10069;0434
afii10070;0435
afii10071;0451
afii10072;0436
afii10073;0437
afii10074;0438
afii10075;0439
afii10076;043A
afii10077;043B
afii10078;043C
afii10079;043D
afii10080;043E
afii10081;043F
afii10082;0440
afii10083;0441
afii10084;0442
afii10085;0443
afii10086;0444
afii10087;0445
afii10088;0446
afii10089;0447
afii10090;0448
afii10091;0449
afii10092;044A
afii10093;044B
afii10094;044C
afii10095;044D
afii10096;044E
afii10097;044F
afii10098;0491
afii10099;0452
afii10100;0453
afii10101;0454
afii10102;0455
afii10103;0456
afii10104;0457
afii10105;0458
afii10106;0459
afii10107;045A
afii10108;045B
afii10109;045C
afii10110;045E
afii10145;040F
afii10146;0462
afii10147;0472
afii10148;0474
afii10192;F6C6
afii10193;045F
afii10194;0463
afii10195;0473
afii10196;0475
afii10831;F6C7
afii10832;F6C8
afii10846;04D9
afii299;200E
afii300;200F
afii301;200D
afii57381;066A
afii57388;060C
afii57392;0660
afii57393;0661
afii57394;0662
afii57395;0663
afii57396;0664
afii57397;0665
afii57398;0666
afii57399;0667
afii57400;0668
afii57401;0669
afii57403;061B
afii57407;061F
afii57409;0621
afii57410;0622
afii57411;0623
afii57412;0624
afii57413;0625
afii57414;0626
afii57415;0627
afii57416;0628
afii57417;0629
afii57418;062A
afii57419;062B
afii57420;062C
afii57421;062D
afii57422;062E
afii57423;062F
afii57424;0630
afii57425;0631
afii57426;0632
afii57427;0633
afii57428;0634
afii57429;0635
afii57430;0636
afii57431;0637
afii57432;0638
afii57433;0639
afii57434;063A
afii57440;0640
afii57441;0641
afii57442;0642
afii57443;0643
afii57444;0644
afii57445;0645
afii57446;0646
afii57448;0648
afii57449;0649
afii57450;064A
afii57451;064B
afii57452;064C
afii57453;064D
afii57454;064E
afii57455;064F
afii57456;0650
afii57457;0651
afii57458;0652
afii57470;0647
afii57505;06A4
afii57506;067E
afii57507;0686
afii57508;0698
afii57509;06AF
afii57511;0679
afii57512;0688
afii57513;0691
afii57514;06BA
afii57519;06D2
afii57534;06D5
afii57636;20AA
afii57645;05BE
afii57658;05C3
afii57664;05D0
afii57665;05D1
afii57666;05D2
afii57667;05D3
afii57668;05D4
afii57669;05D5
afii57670;05D6
afii57671;05D7
afii57672;05D8
afii57673;05D9
afii57674;05DA
afii57675;05DB
afii57676;05DC
afii57677;05DD
afii57678;05DE
afii57679;05DF
afii57680;05E0
afii57681;05E1
afii57682;05E2
afii57683;05E3
afii57684;05E4
afii57685;05E5
afii57686;05E6
afii57687;05E7
afii57688;05E8
afii57689;05E9
afii57690;05EA
afii57694;FB2A
afii57695;FB2B
afii57700;FB4B
afii57705;FB1F
afii57716;05F0
afii57717;05F1
afii57718;05F2
afii57723;FB35
afii57793;05B4
afii57794;05B5
afii57795;05B6
afii57796;05BB
afii57797;05B8
afii57798;05B7
afii57799;05B0
afii57800;05B2
afii57801;05B1
afii57802;05B3
afii57803;05C2
afii57804;05C1
afii57806;05B9
afii57807;05BC
afii57839;05BD
afii57841;05BF
afii57842;05C0
afii57929;02BC
afii61248;2105
afii61289;2113
afii61352;2116
afii61573;202C
afii61574;202D
afii61575;202E
afii61664;200C
afii63167;066D
afii64937;02BD
agrave;00E0
agujarati;0A85
agurmukhi;0A05
ahiragana;3042
ahookabove;1EA3
aibengali;0990
aibopomofo;311E
aideva;0910
aiecyrillic;04D5
aigujarati;0A90
aigurmukhi;0A10
aimatragurmukhi;0A48
ainarabic;0639
ainfinalarabic;FECA
aininitialarabic;FECB
ainmedialarabic;FECC
ainvertedbreve;0203
aivowelsignbengali;09C8
aivowelsigndeva;0948
aivowelsigngujarati;0AC8
akatakana;30A2
akatakanahalfwidth;FF71
akorean;314F
alef;05D0
alefarabic;0627
alefdageshhebrew;FB30
aleffinalarabic;FE8E
alefhamzaabovearabic;0623
alefhamzaabovefinalarabic;FE84
alefhamzabelowarabic;0625
alefhamzabelowfinalarabic;FE88
alefhebrew;05D0
aleflamedhebrew;FB4F
alefmaddaabovearabic;0622
alefmaddaabovefinalarabic;FE82
alefmaksuraarabic;0649
alefmaksurafinalarabic;FEF0
alefmaksurainitialarabic;FEF3
alefmaksuramedialarabic;FEF4
alefpatahhebrew;FB2E
alefqamatshebrew;FB2F
aleph;2135
allequal;224C
alpha;03B1
alphatonos;03AC
amacron;0101
amonospace;FF41
ampersand;0026
ampersandmonospace;FF06
ampersandsmall;F726
amsquare;33C2
anbopomofo;3122
angbopomofo;3124
angkhankhuthai;0E5A
angle;2220
anglebracketleft;3008
anglebracketleftvertical;FE3F
anglebracketright;3009
anglebracketrightvertical;FE40
angleleft;2329
angleright;232A
angstrom;212B
anoteleia;0387
anudattadeva;0952
anusvarabengali;0982
anusvaradeva;0902
anusvaragujarati;0A82
aogonek;0105
apaatosquare;3300
aparen;249C
apostrophearmenian;055A
apostrophemod;02BC
apple;F8FF
approaches;2250
approxequal;2248
approxequalorimage;2252
approximatelyequal;2245
araeaekorean;318E
araeakorean;318D
arc;2312
arighthalfring;1E9A
aring;00E5
aringacute;01FB
aringbelow;1E01
arrowboth;2194
arrowdashdown;21E3
arrowdashleft;21E0
arrowdashright;21E2
arrowdashup;21E1
arrowdblboth;21D4
arrowdbldown;21D3
arrowdblleft;21D0
arrowdblright;21D2
arrowdblup;21D1
arrowdown;2193
arrowdownleft;2199
arrowdownright;2198
arrowdownwhite;21E9
arrowheaddownmod;02C5
arrowheadleftmod;02C2
arrowheadrightmod;02C3
arrowheadupmod;02C4
arrowhorizex;F8E7
arrowleft;2190
arrowleftdbl;21D0
arrowleftdblstroke;21CD
arrowleftoverright;21C6
arrowleftwhite;21E6
arrowright;2192
arrowrightdblstroke;21CF
arrowrightheavy;279E
arrowrightoverleft;21C4
arrowrightwhite;21E8
arrowtableft;21E4
arrowtabright;21E5
arrowup;2191
arrowupdn;2195
arrowupdnbse;21A8
arrowupdownbase;21A8
arrowupleft;2196
arrowupleftofdown;21C5
arrowupright;2197
arrowupwhite;21E7
arrowvertex;F8E6
asciicircum;005E
asciicircummonospace;FF3E
asciitilde;007E
asciitildemonospace;FF5E
ascript;0251
ascriptturned;0252
asmallhiragana;3041
asmallkatakana;30A1
asmallkatakanahalfwidth;FF67
asterisk;002A
asteriskaltonearabic;066D
asteriskarabic;066D
asteriskmath;2217
asteriskmonospace;FF0A
asterisksmall;FE61
asterism;2042
asuperior;F6E9
asymptoticallyequal;2243
at;0040
atilde;00E3
atmonospace;FF20
atsmall;FE6B
aturned;0250
aubengali;0994
aubopomofo;3120
audeva;0914
augujarati;0A94
augurmukhi;0A14
aulengthmarkbengali;09D7
aumatragurmukhi;0A4C
auvowelsignbengali;09CC
auvowelsigndeva;094C
auvowelsigngujarati;0ACC
avagrahadeva;093D
aybarmenian;0561
ayin;05E2
ayinaltonehebrew;FB20
ayinhebrew;05E2
b;0062
babengali;09AC
backslash;005C
backslashmonospace;FF3C
badeva;092C
bagujarati;0AAC
bagurmukhi;0A2C
bahiragana;3070
bahtthai;0E3F
bakatakana;30D0
bar;007C
barmonospace;FF5C
bbopomofo;3105
bcircle;24D1
bdotaccent;1E03
bdotbelow;1E05
beamedsixteenthnotes;266C
because;2235
becyrillic;0431
beharabic;0628
behfinalarabic;FE90
behinitialarabic;FE91
behiragana;3079
behmedialarabic;FE92
behmeeminitialarabic;FC9F
behmeemisolatedarabic;FC08
behnoonfinalarabic;FC6D
bekatakana;30D9
benarmenian;0562
bet;05D1
beta;03B2
betasymbolgreek;03D0
betdagesh;FB31
betdageshhebrew;FB31
bethebrew;05D1
betrafehebrew;FB4C
bhabengali;09AD
bhadeva;092D
bhagujarati;0AAD
bhagurmukhi;0A2D
bhook;0253
bihiragana;3073
bikatakana;30D3
bilabialclick;0298
bindigurmukhi;0A02
birusquare;3331
blackcircle;25CF
blackdiamond;25C6
blackdownpointingtriangle;25BC
blackleftpointingpointer;25C4
blackleftpointingtriangle;25C0
blacklenticularbracketleft;3010
blacklenticularbracketleftvertical;FE3B
blacklenticularbracketright;3011
blacklenticularbracketrightvertical;FE3C
blacklowerlefttriangle;25E3
blacklowerrighttriangle;25E2
blackrectangle;25AC
blackrightpointingpointer;25BA
blackrightpointingtriangle;25B6
blacksmallsquare;25AA
blacksmilingface;263B
blacksquare;25A0
blackstar;2605
blackupperlefttriangle;25E4
blackupperrighttriangle;25E5
blackuppointingsmalltriangle;25B4
blackuppointingtriangle;25B2
blank;2423
blinebelow;1E07
block;2588
bmonospace;FF42
bobaimaithai;0E1A
bohiragana;307C
bokatakana;30DC
bparen;249D
bqsquare;33C3
braceex;F8F4
braceleft;007B
braceleftbt;F8F3
braceleftmid;F8F2
braceleftmonospace;FF5B
braceleftsmall;FE5B
bracelefttp;F8F1
braceleftvertical;FE37
braceright;007D
bracerightbt;F8FE
bracerightmid;F8FD
bracerightmonospace;FF5D
bracerightsmall;FE5C
bracerighttp;F8FC
bracerightvertical;FE38
bracketleft;005B
bracketleftbt;F8F0
bracketleftex;F8EF
bracketleftmonospace;FF3B
bracketlefttp;F8EE
bracketright;005D
bracketrightbt;F8FB
bracketrightex;F8FA
bracketrightmonospace;FF3D
bracketrighttp;F8F9
breve;02D8
brevebelowcmb;032E
brevecmb;0306
breveinvertedbelowcmb;032F
breveinvertedcmb;0311
breveinverteddoublecmb;0361
bridgebelowcmb;032A
bridgeinvertedbelowcmb;033A
brokenbar;00A6
bstroke;0180
bsuperior;F6EA
btopbar;0183
buhiragana;3076
bukatakana;30D6
bullet;2022
bulletinverse;25D8
bulletoperator;2219
bullseye;25CE
c;0063
caarmenian;056E
cabengali;099A
cacute;0107
cadeva;091A
cagujarati;0A9A
cagurmukhi;0A1A
calsquare;3388
candrabindubengali;0981
candrabinducmb;0310
candrabindudeva;0901
candrabindugujarati;0A81
capslock;21EA
careof;2105
caron;02C7
caronbelowcmb;032C
caroncmb;030C
carriagereturn;21B5
cbopomofo;3118
ccaron;010D
ccedilla;00E7
ccedillaacute;1E09
ccircle;24D2
ccircumflex;0109
ccurl;0255
cdot;010B
cdotaccent;010B
cdsquare;33C5
cedilla;00B8
cedillacmb;0327
cent;00A2
centigrade;2103
centinferior;F6DF
centmonospace;FFE0
centoldstyle;F7A2
centsuperior;F6E0
chaarmenian;0579
chabengali;099B
chadeva;091B
chagujarati;0A9B
chagurmukhi;0A1B
chbopomofo;3114
cheabkhasiancyrillic;04BD
checkmark;2713
checyrillic;0447
chedescenderabkhasiancyrillic;04BF
chedescendercyrillic;04B7
chedieresiscyrillic;04F5
cheharmenian;0573
chekhakassiancyrillic;04CC
cheverticalstrokecyrillic;04B9
chi;03C7
chieuchacirclekorean;3277
chieuchaparenkorean;3217
chieuchcirclekorean;3269
chieuchkorean;314A
chieuchparenkorean;3209
chochangthai;0E0A
chochanthai;0E08
chochingthai;0E09
chochoethai;0E0C
chook;0188
cieucacirclekorean;3276
cieucaparenkorean;3216
cieuccirclekorean;3268
cieuckorean;3148
cieucparenkorean;3208
cieucuparenkorean;321C
circle;25CB
circlemultiply;2297
circleot;2299
circleplus;2295
circlepostalmark;3036
circlewithlefthalfblack;25D0
circlewithrighthalfblack;25D1
circumflex;02C6
circumflexbelowcmb;032D
circumflexcmb;0302
clear;2327
clickalveolar;01C2
clickdental;01C0
clicklateral;01C1
clickretroflex;01C3
club;2663
clubsuitblack;2663
clubsuitwhite;2667
cmcubedsquare;33A4
cmonospace;FF43
cmsquaredsquare;33A0
coarmenian;0581
colon;003A
colonmonetary;20A1
colonmonospace;FF1A
colonsign;20A1
colonsmall;FE55
colontriangularhalfmod;02D1
colontriangularmod;02D0
comma;002C
commaabovecmb;0313
commaaboverightcmb;0315
commaaccent;F6C3
commaarabic;060C
commaarmenian;055D
commainferior;F6E1
commamonospace;FF0C
commareversedabovecmb;0314
commareversedmod;02BD
commasmall;FE50
commasuperior;F6E2
commaturnedabovecmb;0312
commaturnedmod;02BB
compass;263C
congruent;2245
contourintegral;222E
control;2303
controlACK;0006
controlBEL;0007
controlBS;0008
controlCAN;0018
controlCR;000D
controlDC1;0011
controlDC2;0012
controlDC3;0013
controlDC4;0014
controlDEL;007F
controlDLE;0010
controlEM;0019
controlENQ;0005
controlEOT;0004
controlESC;001B
controlETB;0017
controlETX;0003
controlFF;000C
controlFS;001C
controlGS;001D
controlHT;0009
controlLF;000A
controlNAK;0015
controlRS;001E
controlSI;000F
controlSO;000E
controlSOT;0002
controlSTX;0001
controlSUB;001A
controlSYN;0016
controlUS;001F
controlVT;000B
copyright;00A9
copyrightsans;F8E9
copyrightserif;F6D9
cornerbracketleft;300C
cornerbracketlefthalfwidth;FF62
cornerbracketleftvertical;FE41
cornerbracketright;300D
cornerbracketrighthalfwidth;FF63
cornerbracketrightvertical;FE42
corporationsquare;337F
cosquare;33C7
coverkgsquare;33C6
cparen;249E
cruzeiro;20A2
cstretched;0297
curlyand;22CF
curlyor;22CE
currency;00A4
cyrBreve;F6D1
cyrFlex;F6D2
cyrbreve;F6D4
cyrflex;F6D5
d;0064
daarmenian;0564
dabengali;09A6
dadarabic;0636
dadeva;0926
dadfinalarabic;FEBE
dadinitialarabic;FEBF
dadmedialarabic;FEC0
dagesh;05BC
dageshhebrew;05BC
dagger;2020
daggerdbl;2021
dagujarati;0AA6
dagurmukhi;0A26
dahiragana;3060
dakatakana;30C0
dalarabic;062F
dalet;05D3
daletdagesh;FB33
daletdageshhebrew;FB33
dalethatafpatah;05D3 05B2
dalethatafpatahhebrew;05D3 05B2
dalethatafsegol;05D3 05B1
dalethatafsegolhebrew;05D3 05B1
dalethebrew;05D3
dalethiriq;05D3 05B4
dalethiriqhebrew;05D3 05B4
daletholam;05D3 05B9
daletholamhebrew;05D3 05B9
daletpatah;05D3 05B7
daletpatahhebrew;05D3 05B7
daletqamats;05D3 05B8
daletqamatshebrew;05D3 05B8
daletqubuts;05D3 05BB
daletqubutshebrew;05D3 05BB
daletsegol;05D3 05B6
daletsegolhebrew;05D3 05B6
daletsheva;05D3 05B0
daletshevahebrew;05D3 05B0
dalettsere;05D3 05B5
dalettserehebrew;05D3 05B5
dalfinalarabic;FEAA
dammaarabic;064F
dammalowarabic;064F
dammatanaltonearabic;064C
dammatanarabic;064C
danda;0964
dargahebrew;05A7
dargalefthebrew;05A7
dasiapneumatacyrilliccmb;0485
dblGrave;F6D3
dblanglebracketleft;300A
dblanglebracketleftvertical;FE3D
dblanglebracketright;300B
dblanglebracketrightvertical;FE3E
dblarchinvertedbelowcmb;032B
dblarrowleft;21D4
dblarrowright;21D2
dbldanda;0965
dblgrave;F6D6
dblgravecmb;030F
dblintegral;222C
dbllowline;2017
dbllowlinecmb;0333
dbloverlinecmb;033F
dblprimemod;02BA
dblverticalbar;2016
dblverticallineabovecmb;030E
dbopomofo;3109
dbsquare;33C8
dcaron;010F
dcedilla;1E11
dcircle;24D3
dcircumflexbelow;1E13
dcroat;0111
ddabengali;09A1
ddadeva;0921
ddagujarati;0AA1
ddagurmukhi;0A21
ddalarabic;0688
ddalfinalarabic;FB89
dddhadeva;095C
ddhabengali;09A2
ddhadeva;0922
ddhagujarati;0AA2
ddhagurmukhi;0A22
ddotaccent;1E0B
ddotbelow;1E0D
decimalseparatorarabic;066B
decimalseparatorpersian;066B
decyrillic;0434
degree;00B0
dehihebrew;05AD
dehiragana;3067
deicoptic;03EF
dekatakana;30C7
deleteleft;232B
deleteright;2326
delta;03B4
deltaturned;018D
denominatorminusonenumeratorbengali;09F8
dezh;02A4
dhabengali;09A7
dhadeva;0927
dhagujarati;0AA7
dhagurmukhi;0A27
dhook;0257
dialytikatonos;0385
dialytikatonoscmb;0344
diamond;2666
diamondsuitwhite;2662
dieresis;00A8
dieresisacute;F6D7
dieresisbelowcmb;0324
dieresiscmb;0308
dieresisgrave;F6D8
dieresistonos;0385
dihiragana;3062
dikatakana;30C2
dittomark;3003
divide;00F7
divides;2223
divisionslash;2215
djecyrillic;0452
dkshade;2593
dlinebelow;1E0F
dlsquare;3397
dmacron;0111
dmonospace;FF44
dnblock;2584
dochadathai;0E0E
dodekthai;0E14
dohiragana;3069
dokatakana;30C9
dollar;0024
dollarinferior;F6E3
dollarmonospace;FF04
dollaroldstyle;F724
dollarsmall;FE69
dollarsuperior;F6E4
dong;20AB
dorusquare;3326
dotaccent;02D9
dotaccentcmb;0307
dotbelowcmb;0323
dotbelowcomb;0323
dotkatakana;30FB
dotlessi;0131
dotlessj;F6BE
dotlessjstrokehook;0284
dotmath;22C5
dottedcircle;25CC
doubleyodpatah;FB1F
doubleyodpatahhebrew;FB1F
downtackbelowcmb;031E
downtackmod;02D5
dparen;249F
dsuperior;F6EB
dtail;0256
dtopbar;018C
duhiragana;3065
dukatakana;30C5
dz;01F3
dzaltone;02A3
dzcaron;01C6
dzcurl;02A5
dzeabkhasiancyrillic;04E1
dzecyrillic;0455
dzhecyrillic;045F
e;0065
eacute;00E9
earth;2641
ebengali;098F
ebopomofo;311C
ebreve;0115
ecandradeva;090D
ecandragujarati;0A8D
ecandravowelsigndeva;0945
ecandravowelsigngujarati;0AC5
ecaron;011B
ecedillabreve;1E1D
echarmenian;0565
echyiwnarmenian;0587
ecircle;24D4
ecircumflex;00EA
ecircumflexacute;1EBF
ecircumflexbelow;1E19
ecircumflexdotbelow;1EC7
ecircumflexgrave;1EC1
ecircumflexhookabove;1EC3
ecircumflextilde;1EC5
ecyrillic;0454
edblgrave;0205
edeva;090F
edieresis;00EB
edot;0117
edotaccent;0117
edotbelow;1EB9
eegurmukhi;0A0F
eematragurmukhi;0A47
efcyrillic;0444
egrave;00E8
egujarati;0A8F
eharmenian;0567
ehbopomofo;311D
ehiragana;3048
ehookabove;1EBB
eibopomofo;311F
eight;0038
eightarabic;0668
eightbengali;09EE
eightcircle;2467
eightcircleinversesansserif;2791
eightdeva;096E
eighteencircle;2471
eighteenparen;2485
eighteenperiod;2499
eightgujarati;0AEE
eightgurmukhi;0A6E
eighthackarabic;0668
eighthangzhou;3028
eighthnotebeamed;266B
eightideographicparen;3227
eightinferior;2088
eightmonospace;FF18
eightoldstyle;F738
eightparen;247B
eightperiod;248F
eightpersian;06F8
eightroman;2177
eightsuperior;2078
eightthai;0E58
einvertedbreve;0207
eiotifiedcyrillic;0465
ekatakana;30A8
ekatakanahalfwidth;FF74
ekonkargurmukhi;0A74
ekorean;3154
elcyrillic;043B
element;2208
elevencircle;246A
elevenparen;247E
elevenperiod;2492
elevenroman;217A
ellipsis;2026
ellipsisvertical;22EE
emacron;0113
emacronacute;1E17
emacrongrave;1E15
emcyrillic;043C
emdash;2014
emdashvertical;FE31
emonospace;FF45
emphasismarkarmenian;055B
emptyset;2205
enbopomofo;3123
encyrillic;043D
endash;2013
endashvertical;FE32
endescendercyrillic;04A3
eng;014B
engbopomofo;3125
enghecyrillic;04A5
enhookcyrillic;04C8
enspace;2002
eogonek;0119
eokorean;3153
eopen;025B
eopenclosed;029A
eopenreversed;025C
eopenreversedclosed;025E
eopenreversedhook;025D
eparen;24A0
epsilon;03B5
epsilontonos;03AD
equal;003D
equalmonospace;FF1D
equalsmall;FE66
equalsuperior;207C
equivalence;2261
erbopomofo;3126
ercyrillic;0440
ereversed;0258
ereversedcyrillic;044D
escyrillic;0441
esdescendercyrillic;04AB
esh;0283
eshcurl;0286
eshortdeva;090E
eshortvowelsigndeva;0946
eshreversedloop;01AA
eshsquatreversed;0285
esmallhiragana;3047
esmallkatakana;30A7
esmallkatakanahalfwidth;FF6A
estimated;212E
esuperior;F6EC
eta;03B7
etarmenian;0568
etatonos;03AE
eth;00F0
etilde;1EBD
etildebelow;1E1B
etnahtafoukhhebrew;0591
etnahtafoukhlefthebrew;0591
etnahtahebrew;0591
etnahtalefthebrew;0591
eturned;01DD
eukorean;3161
euro;20AC
evowelsignbengali;09C7
evowelsigndeva;0947
evowelsigngujarati;0AC7
exclam;0021
exclamarmenian;055C
exclamdbl;203C
exclamdown;00A1
exclamdownsmall;F7A1
exclammonospace;FF01
exclamsmall;F721
existential;2203
ezh;0292
ezhcaron;01EF
ezhcurl;0293
ezhreversed;01B9
ezhtail;01BA
f;0066
fadeva;095E
fagurmukhi;0A5E
fahrenheit;2109
fathaarabic;064E
fathalowarabic;064E
fathatanarabic;064B
fbopomofo;3108
fcircle;24D5
fdotaccent;1E1F
feharabic;0641
feharmenian;0586
fehfinalarabic;FED2
fehinitialarabic;FED3
fehmedialarabic;FED4
feicoptic;03E5
female;2640
ff;FB00
ffi;FB03
ffl;FB04
fi;FB01
fifteencircle;246E
fifteenparen;2482
fifteenperiod;2496
figuredash;2012
filledbox;25A0
filledrect;25AC
finalkaf;05DA
finalkafdagesh;FB3A
finalkafdageshhebrew;FB3A
finalkafhebrew;05DA
finalkafqamats;05DA 05B8
finalkafqamatshebrew;05DA 05B8
finalkafsheva;05DA 05B0
finalkafshevahebrew;05DA 05B0
finalmem;05DD
finalmemhebrew;05DD
finalnun;05DF
finalnunhebrew;05DF
finalpe;05E3
finalpehebrew;05E3
finaltsadi;05E5
finaltsadihebrew;05E5
firsttonechinese;02C9
fisheye;25C9
fitacyrillic;0473
five;0035
fivearabic;0665
fivebengali;09EB
fivecircle;2464
fivecircleinversesansserif;278E
fivedeva;096B
fiveeighths;215D
fivegujarati;0AEB
fivegurmukhi;0A6B
fivehackarabic;0665
fivehangzhou;3025
fiveideographicparen;3224
fiveinferior;2085
fivemonospace;FF15
fiveoldstyle;F735
fiveparen;2478
fiveperiod;248C
fivepersian;06F5
fiveroman;2174
fivesuperior;2075
fivethai;0E55
fl;FB02
florin;0192
fmonospace;FF46
fmsquare;3399
fofanthai;0E1F
fofathai;0E1D
fongmanthai;0E4F
forall;2200
four;0034
fourarabic;0664
fourbengali;09EA
fourcircle;2463
fourcircleinversesansserif;278D
fourdeva;096A
fourgujarati;0AEA
fourgurmukhi;0A6A
fourhackarabic;0664
fourhangzhou;3024
fourideographicparen;3223
fourinferior;2084
fourmonospace;FF14
fournumeratorbengali;09F7
fouroldstyle;F734
fourparen;2477
fourperiod;248B
fourpersian;06F4
fourroman;2173
foursuperior;2074
fourteencircle;246D
fourteenparen;2481
fourteenperiod;2495
fourthai;0E54
fourthtonechinese;02CB
fparen;24A1
fraction;2044
franc;20A3
g;0067
gabengali;0997
gacute;01F5
gadeva;0917
gafarabic;06AF
gaffinalarabic;FB93
gafinitialarabic;FB94
gafmedialarabic;FB95
gagujarati;0A97
gagurmukhi;0A17
gahiragana;304C
gakatakana;30AC
gamma;03B3
gammalatinsmall;0263
gammasuperior;02E0
gangiacoptic;03EB
gbopomofo;310D
gbreve;011F
gcaron;01E7
gcedilla;0123
gcircle;24D6
gcircumflex;011D
gcommaaccent;0123
gdot;0121
gdotaccent;0121
gecyrillic;0433
gehiragana;3052
gekatakana;30B2
geometricallyequal;2251
gereshaccenthebrew;059C
gereshhebrew;05F3
gereshmuqdamhebrew;059D
germandbls;00DF
gershayimaccenthebrew;059E
gershayimhebrew;05F4
getamark;3013
ghabengali;0998
ghadarmenian;0572
ghadeva;0918
ghagujarati;0A98
ghagurmukhi;0A18
ghainarabic;063A
ghainfinalarabic;FECE
ghaininitialarabic;FECF
ghainmedialarabic;FED0
ghemiddlehookcyrillic;0495
ghestrokecyrillic;0493
gheupturncyrillic;0491
ghhadeva;095A
ghhagurmukhi;0A5A
ghook;0260
ghzsquare;3393
gihiragana;304E
gikatakana;30AE
gimarmenian;0563
gimel;05D2
gimeldagesh;FB32
gimeldageshhebrew;FB32
gimelhebrew;05D2
gjecyrillic;0453
glottalinvertedstroke;01BE
glottalstop;0294
glottalstopinverted;0296
glottalstopmod;02C0
glottalstopreversed;0295
glottalstopreversedmod;02C1
glottalstopreversedsuperior;02E4
glottalstopstroke;02A1
glottalstopstrokereversed;02A2
gmacron;1E21
gmonospace;FF47
gohiragana;3054
gokatakana;30B4
gparen;24A2
gpasquare;33AC
gradient;2207
grave;0060
gravebelowcmb;0316
gravecmb;0300
gravecomb;0300
gravedeva;0953
gravelowmod;02CE
gravemonospace;FF40
gravetonecmb;0340
greater;003E
greaterequal;2265
greaterequalorless;22DB
greatermonospace;FF1E
greaterorequivalent;2273
greaterorless;2277
greateroverequal;2267
greatersmall;FE65
gscript;0261
gstroke;01E5
guhiragana;3050
guillemotleft;00AB
guillemotright;00BB
guilsinglleft;2039
guilsinglright;203A
gukatakana;30B0
guramusquare;3318
gysquare;33C9
h;0068
haabkhasiancyrillic;04A9
haaltonearabic;06C1
habengali;09B9
hadescendercyrillic;04B3
hadeva;0939
hagujarati;0AB9
hagurmukhi;0A39
haharabic;062D
hahfinalarabic;FEA2
hahinitialarabic;FEA3
hahiragana;306F
hahmedialarabic;FEA4
haitusquare;332A
hakatakana;30CF
hakatakanahalfwidth;FF8A
halantgurmukhi;0A4D
hamzaarabic;0621
hamzadammaarabic;0621 064F
hamzadammatanarabic;0621 064C
hamzafathaarabic;0621 064E
hamzafathatanarabic;0621 064B
hamzalowarabic;0621
hamzalowkasraarabic;0621 0650
hamzalowkasratanarabic;0621 064D
hamzasukunarabic;0621 0652
hangulfiller;3164
hardsigncyrillic;044A
harpoonleftbarbup;21BC
harpoonrightbarbup;21C0
hasquare;33CA
hatafpatah;05B2
hatafpatah16;05B2
hatafpatah23;05B2
hatafpatah2f;05B2
hatafpatahhebrew;05B2
hatafpatahnarrowhebrew;05B2
hatafpatahquarterhebrew;05B2
hatafpatahwidehebrew;05B2
hatafqamats;05B3
hatafqamats1b;05B3
hatafqamats28;05B3
hatafqamats34;05B3
hatafqamatshebrew;05B3
hatafqamatsnarrowhebrew;05B3
hatafqamatsquarterhebrew;05B3
hatafqamatswidehebrew;05B3
hatafsegol;05B1
hatafsegol17;05B1
hatafsegol24;05B1
hatafsegol30;05B1
hatafsegolhebrew;05B1
hatafsegolnarrowhebrew;05B1
hatafsegolquarterhebrew;05B1
hatafsegolwidehebrew;05B1
hbar;0127
hbopomofo;310F
hbrevebelow;1E2B
hcedilla;1E29
hcircle;24D7
hcircumflex;0125
hdieresis;1E27
hdotaccent;1E23
hdotbelow;1E25
he;05D4
heart;2665
heartsuitblack;2665
heartsuitwhite;2661
hedagesh;FB34
hedageshhebrew;FB34
hehaltonearabic;06C1
heharabic;0647
hehebrew;05D4
hehfinalaltonearabic;FBA7
hehfinalalttwoarabic;FEEA
hehfinalarabic;FEEA
hehhamzaabovefinalarabic;FBA5
hehhamzaaboveisolatedarabic;FBA4
hehinitialaltonearabic;FBA8
hehinitialarabic;FEEB
hehiragana;3078
hehmedialaltonearabic;FBA9
hehmedialarabic;FEEC
heiseierasquare;337B
hekatakana;30D8
hekatakanahalfwidth;FF8D
hekutaarusquare;3336
henghook;0267
herutusquare;3339
het;05D7
hethebrew;05D7
hhook;0266
hhooksuperior;02B1
hieuhacirclekorean;327B
hieuhaparenkorean;321B
hieuhcirclekorean;326D
hieuhkorean;314E
hieuhparenkorean;320D
hihiragana;3072
hikatakana;30D2
hikatakanahalfwidth;FF8B
hiriq;05B4
hiriq14;05B4
hiriq21;05B4
hiriq2d;05B4
hiriqhebrew;05B4
hiriqnarrowhebrew;05B4
hiriqquarterhebrew;05B4
hiriqwidehebrew;05B4
hlinebelow;1E96
hmonospace;FF48
hoarmenian;0570
hohipthai;0E2B
hohiragana;307B
hokatakana;30DB
hokatakanahalfwidth;FF8E
holam;05B9
holam19;05B9
holam26;05B9
holam32;05B9
holamhebrew;05B9
holamnarrowhebrew;05B9
holamquarterhebrew;05B9
holamwidehebrew;05B9
honokhukthai;0E2E
hookabovecomb;0309
hookcmb;0309
hookpalatalizedbelowcmb;0321
hookretroflexbelowcmb;0322
hoonsquare;3342
horicoptic;03E9
horizontalbar;2015
horncmb;031B
hotsprings;2668
house;2302
hparen;24A3
hsuperior;02B0
hturned;0265
huhiragana;3075
huiitosquare;3333
hukatakana;30D5
hukatakanahalfwidth;FF8C
hungarumlaut;02DD
hungarumlautcmb;030B
hv;0195
hyphen;002D
hypheninferior;F6E5
hyphenmonospace;FF0D
hyphensmall;FE63
hyphensuperior;F6E6
hyphentwo;2010
i;0069
iacute;00ED
iacyrillic;044F
ibengali;0987
ibopomofo;3127
ibreve;012D
icaron;01D0
icircle;24D8
icircumflex;00EE
icyrillic;0456
idblgrave;0209
ideographearthcircle;328F
ideographfirecircle;328B
ideographicallianceparen;323F
ideographiccallparen;323A
ideographiccentrecircle;32A5
ideographicclose;3006
ideographiccomma;3001
ideographiccommaleft;FF64
ideographiccongratulationparen;3237
ideographiccorrectcircle;32A3
ideographicearthparen;322F
ideographicenterpriseparen;323D
ideographicexcellentcircle;329D
ideographicfestivalparen;3240
ideographicfinancialcircle;3296
ideographicfinancialparen;3236
ideographicfireparen;322B
ideographichaveparen;3232
ideographichighcircle;32A4
ideographiciterationmark;3005
ideographiclaborcircle;3298
ideographiclaborparen;3238
ideographicleftcircle;32A7
ideographiclowcircle;32A6
ideographicmedicinecircle;32A9
ideographicmetalparen;322E
ideographicmoonparen;322A
ideographicnameparen;3234
ideographicperiod;3002
ideographicprintcircle;329E
ideographicreachparen;3243
ideographicrepresentparen;3239
ideographicresourceparen;323E
ideographicrightcircle;32A8
ideographicsecretcircle;3299
ideographicselfparen;3242
ideographicsocietyparen;3233
ideographicspace;3000
ideographicspecialparen;3235
ideographicstockparen;3231
ideographicstudyparen;323B
ideographicsunparen;3230
ideographicsuperviseparen;323C
ideographicwaterparen;322C
ideographicwoodparen;322D
ideographiczero;3007
ideographmetalcircle;328E
ideographmooncircle;328A
ideographnamecircle;3294
ideographsuncircle;3290
ideographwatercircle;328C
ideographwoodcircle;328D
ideva;0907
idieresis;00EF
idieresisacute;1E2F
idieresiscyrillic;04E5
idotbelow;1ECB
iebrevecyrillic;04D7
iecyrillic;0435
ieungacirclekorean;3275
ieungaparenkorean;3215
ieungcirclekorean;3267
ieungkorean;3147
ieungparenkorean;3207
igrave;00EC
igujarati;0A87
igurmukhi;0A07
ihiragana;3044
ihookabove;1EC9
iibengali;0988
iicyrillic;0438
iideva;0908
iigujarati;0A88
iigurmukhi;0A08
iimatragurmukhi;0A40
iinvertedbreve;020B
iishortcyrillic;0439
iivowelsignbengali;09C0
iivowelsigndeva;0940
iivowelsigngujarati;0AC0
ij;0133
ikatakana;30A4
ikatakanahalfwidth;FF72
ikorean;3163
ilde;02DC
iluyhebrew;05AC
imacron;012B
imacroncyrillic;04E3
imageorapproximatelyequal;2253
imatragurmukhi;0A3F
imonospace;FF49
increment;2206
infinity;221E
iniarmenian;056B
integral;222B
integralbottom;2321
integralbt;2321
integralex;F8F5
integraltop;2320
integraltp;2320
intersection;2229
intisquare;3305
invbullet;25D8
invcircle;25D9
invsmileface;263B
iocyrillic;0451
iogonek;012F
iota;03B9
iotadieresis;03CA
iotadieresistonos;0390
iotalatin;0269
iotatonos;03AF
iparen;24A4
irigurmukhi;0A72
ismallhiragana;3043
ismallkatakana;30A3
ismallkatakanahalfwidth;FF68
issharbengali;09FA
istroke;0268
isuperior;F6ED
iterationhiragana;309D
iterationkatakana;30FD
itilde;0129
itildebelow;1E2D
iubopomofo;3129
iucyrillic;044E
ivowelsignbengali;09BF
ivowelsigndeva;093F
ivowelsigngujarati;0ABF
izhitsacyrillic;0475
izhitsadblgravecyrillic;0477
j;006A
jaarmenian;0571
jabengali;099C
jadeva;091C
jagujarati;0A9C
jagurmukhi;0A1C
jbopomofo;3110
jcaron;01F0
jcircle;24D9
jcircumflex;0135
jcrossedtail;029D
jdotlessstroke;025F
jecyrillic;0458
jeemarabic;062C
jeemfinalarabic;FE9E
jeeminitialarabic;FE9F
jeemmedialarabic;FEA0
jeharabic;0698
jehfinalarabic;FB8B
jhabengali;099D
jhadeva;091D
jhagujarati;0A9D
jhagurmukhi;0A1D
jheharmenian;057B
jis;3004
jmonospace;FF4A
jparen;24A5
jsuperior;02B2
k;006B
kabashkircyrillic;04A1
kabengali;0995
kacute;1E31
kacyrillic;043A
kadescendercyrillic;049B
kadeva;0915
kaf;05DB
kafarabic;0643
kafdagesh;FB3B
kafdageshhebrew;FB3B
kaffinalarabic;FEDA
kafhebrew;05DB
kafinitialarabic;FEDB
kafmedialarabic;FEDC
kafrafehebrew;FB4D
kagujarati;0A95
kagurmukhi;0A15
kahiragana;304B
kahookcyrillic;04C4
kakatakana;30AB
kakatakanahalfwidth;FF76
kappa;03BA
kappasymbolgreek;03F0
kapyeounmieumkorean;3171
kapyeounphieuphkorean;3184
kapyeounpieupkorean;3178
kapyeounssangpieupkorean;3179
karoriisquare;330D
kashidaautoarabic;0640
kashidaautonosidebearingarabic;0640
kasmallkatakana;30F5
kasquare;3384
kasraarabic;0650
kasratanarabic;064D
kastrokecyrillic;049F
katahiraprolongmarkhalfwidth;FF70
kaverticalstrokecyrillic;049D
kbopomofo;310E
kcalsquare;3389
kcaron;01E9
kcedilla;0137
kcircle;24DA
kcommaaccent;0137
kdotbelow;1E33
keharmenian;0584
kehiragana;3051
kekatakana;30B1
kekatakanahalfwidth;FF79
kenarmenian;056F
kesmallkatakana;30F6
kgreenlandic;0138
khabengali;0996
khacyrillic;0445
khadeva;0916
khagujarati;0A96
khagurmukhi;0A16
khaharabic;062E
khahfinalarabic;FEA6
khahinitialarabic;FEA7
khahmedialarabic;FEA8
kheicoptic;03E7
khhadeva;0959
khhagurmukhi;0A59
khieukhacirclekorean;3278
khieukhaparenkorean;3218
khieukhcirclekorean;326A
khieukhkorean;314B
khieukhparenkorean;320A
khokhaithai;0E02
khokhonthai;0E05
khokhuatthai;0E03
khokhwaithai;0E04
khomutthai;0E5B
khook;0199
khorakhangthai;0E06
khzsquare;3391
kihiragana;304D
kikatakana;30AD
kikatakanahalfwidth;FF77
kiroguramusquare;3315
kiromeetorusquare;3316
kirosquare;3314
kiyeokacirclekorean;326E
kiyeokaparenkorean;320E
kiyeokcirclekorean;3260
kiyeokkorean;3131
kiyeokparenkorean;3200
kiyeoksioskorean;3133
kjecyrillic;045C
klinebelow;1E35
klsquare;3398
kmcubedsquare;33A6
kmonospace;FF4B
kmsquaredsquare;33A2
kohiragana;3053
kohmsquare;33C0
kokaithai;0E01
kokatakana;30B3
kokatakanahalfwidth;FF7A
kooposquare;331E
koppacyrillic;0481
koreanstandardsymbol;327F
koroniscmb;0343
kparen;24A6
kpasquare;33AA
ksicyrillic;046F
ktsquare;33CF
kturned;029E
kuhiragana;304F
kukatakana;30AF
kukatakanahalfwidth;FF78
kvsquare;33B8
kwsquare;33BE
l;006C
labengali;09B2
lacute;013A
ladeva;0932
lagujarati;0AB2
lagurmukhi;0A32
lakkhangyaothai;0E45
lamaleffinalarabic;FEFC
lamalefhamzaabovefinalarabic;FEF8
lamalefhamzaaboveisolatedarabic;FEF7
lamalefhamzabelowfinalarabic;FEFA
lamalefhamzabelowisolatedarabic;FEF9
lamalefisolatedarabic;FEFB
lamalefmaddaabovefinalarabic;FEF6
lamalefmaddaaboveisolatedarabic;FEF5
lamarabic;0644
lambda;03BB
lambdastroke;019B
lamed;05DC
lameddagesh;FB3C
lameddageshhebrew;FB3C
lamedhebrew;05DC
lamedholam;05DC 05B9
lamedholamdagesh;05DC 05B9 05BC
lamedholamdageshhebrew;05DC 05B9 05BC
lamedholamhebrew;05DC 05B9
lamfinalarabic;FEDE
lamhahinitialarabic;FCCA
laminitialarabic;FEDF
lamjeeminitialarabic;FCC9
lamkhahinitialarabic;FCCB
lamlamhehisolatedarabic;FDF2
lammedialarabic;FEE0
lammeemhahinitialarabic;FD88
lammeeminitialarabic;FCCC
lammeemjeeminitialarabic;FEDF FEE4 FEA0
lammeemkhahinitialarabic;FEDF FEE4 FEA8
largecircle;25EF
lbar;019A
lbelt;026C
lbopomofo;310C
lcaron;013E
lcedilla;013C
lcircle;24DB
lcircumflexbelow;1E3D
lcommaaccent;013C
ldot;0140
ldotaccent;0140
ldotbelow;1E37
ldotbelowmacron;1E39
leftangleabovecmb;031A
lefttackbelowcmb;0318
less;003C
lessequal;2264
lessequalorgreater;22DA
lessmonospace;FF1C
lessorequivalent;2272
lessorgreater;2276
lessoverequal;2266
lesssmall;FE64
lezh;026E
lfblock;258C
lhookretroflex;026D
lira;20A4
liwnarmenian;056C
lj;01C9
ljecyrillic;0459
ll;F6C0
lladeva;0933
llagujarati;0AB3
llinebelow;1E3B
llladeva;0934
llvocalicbengali;09E1
llvocalicdeva;0961
llvocalicvowelsignbengali;09E3
llvocalicvowelsigndeva;0963
lmiddletilde;026B
lmonospace;FF4C
lmsquare;33D0
lochulathai;0E2C
logicaland;2227
logicalnot;00AC
logicalnotreversed;2310
logicalor;2228
lolingthai;0E25
longs;017F
lowlinecenterline;FE4E
lowlinecmb;0332
lowlinedashed;FE4D
lozenge;25CA
lparen;24A7
lslash;0142
lsquare;2113
lsuperior;F6EE
ltshade;2591
luthai;0E26
lvocalicbengali;098C
lvocalicdeva;090C
lvocalicvowelsignbengali;09E2
lvocalicvowelsigndeva;0962
lxsquare;33D3
m;006D
mabengali;09AE
macron;00AF
macronbelowcmb;0331
macroncmb;0304
macronlowmod;02CD
macronmonospace;FFE3
macute;1E3F
madeva;092E
magujarati;0AAE
magurmukhi;0A2E
mahapakhhebrew;05A4
mahapakhlefthebrew;05A4
mahiragana;307E
maichattawalowleftthai;F895
maichattawalowrightthai;F894
maichattawathai;0E4B
maichattawaupperleftthai;F893
maieklowleftthai;F88C
maieklowrightthai;F88B
maiekthai;0E48
maiekupperleftthai;F88A
maihanakatleftthai;F884
maihanakatthai;0E31
maitaikhuleftthai;F889
maitaikhuthai;0E47
maitholowleftthai;F88F
maitholowrightthai;F88E
maithothai;0E49
maithoupperleftthai;F88D
maitrilowleftthai;F892
maitrilowrightthai;F891
maitrithai;0E4A
maitriupperleftthai;F890
maiyamokthai;0E46
makatakana;30DE
makatakanahalfwidth;FF8F
male;2642
mansyonsquare;3347
maqafhebrew;05BE
mars;2642
masoracirclehebrew;05AF
masquare;3383
mbopomofo;3107
mbsquare;33D4
mcircle;24DC
mcubedsquare;33A5
mdotaccent;1E41
mdotbelow;1E43
meemarabic;0645
meemfinalarabic;FEE2
meeminitialarabic;FEE3
meemmedialarabic;FEE4
meemmeeminitialarabic;FCD1
meemmeemisolatedarabic;FC48
meetorusquare;334D
mehiragana;3081
meizierasquare;337E
mekatakana;30E1
mekatakanahalfwidth;FF92
mem;05DE
memdagesh;FB3E
memdageshhebrew;FB3E
memhebrew;05DE
menarmenian;0574
merkhahebrew;05A5
merkhakefulahebrew;05A6
merkhakefulalefthebrew;05A6
merkhalefthebrew;05A5
mhook;0271
mhzsquare;3392
middledotkatakanahalfwidth;FF65
middot;00B7
mieumacirclekorean;3272
mieumaparenkorean;3212
mieumcirclekorean;3264
mieumkorean;3141
mieumpansioskorean;3170
mieumparenkorean;3204
mieumpieupkorean;316E
mieumsioskorean;316F
mihiragana;307F
mikatakana;30DF
mikatakanahalfwidth;FF90
minus;2212
minusbelowcmb;0320
minuscircle;2296
minusmod;02D7
minusplus;2213
minute;2032
miribaarusquare;334A
mirisquare;3349
mlonglegturned;0270
mlsquare;3396
mmcubedsquare;33A3
mmonospace;FF4D
mmsquaredsquare;339F
mohiragana;3082
mohmsquare;33C1
mokatakana;30E2
mokatakanahalfwidth;FF93
molsquare;33D6
momathai;0E21
moverssquare;33A7
moverssquaredsquare;33A8
mparen;24A8
mpasquare;33AB
mssquare;33B3
msuperior;F6EF
mturned;026F
mu;00B5
mu1;00B5
muasquare;3382
muchgreater;226B
muchless;226A
mufsquare;338C
mugreek;03BC
mugsquare;338D
muhiragana;3080
mukatakana;30E0
mukatakanahalfwidth;FF91
mulsquare;3395
multiply;00D7
mumsquare;339B
munahhebrew;05A3
munahlefthebrew;05A3
musicalnote;266A
musicalnotedbl;266B
musicflatsign;266D
musicsharpsign;266F
mussquare;33B2
muvsquare;33B6
muwsquare;33BC
mvmegasquare;33B9
mvsquare;33B7
mwmegasquare;33BF
mwsquare;33BD
n;006E
nabengali;09A8
nabla;2207
nacute;0144
nadeva;0928
nagujarati;0AA8
nagurmukhi;0A28
nahiragana;306A
nakatakana;30CA
nakatakanahalfwidth;FF85
napostrophe;0149
nasquare;3381
nbopomofo;310B
nbspace;00A0
ncaron;0148
ncedilla;0146
ncircle;24DD
ncircumflexbelow;1E4B
ncommaaccent;0146
ndotaccent;1E45
ndotbelow;1E47
nehiragana;306D
nekatakana;30CD
nekatakanahalfwidth;FF88
newsheqelsign;20AA
nfsquare;338B
ngabengali;0999
ngadeva;0919
ngagujarati;0A99
ngagurmukhi;0A19
ngonguthai;0E07
nhiragana;3093
nhookleft;0272
nhookretroflex;0273
nieunacirclekorean;326F
nieunaparenkorean;320F
nieuncieuckorean;3135
nieuncirclekorean;3261
nieunhieuhkorean;3136
nieunkorean;3134
nieunpansioskorean;3168
nieunparenkorean;3201
nieunsioskorean;3167
nieuntikeutkorean;3166
nihiragana;306B
nikatakana;30CB
nikatakanahalfwidth;FF86
nikhahitleftthai;F899
nikhahitthai;0E4D
nine;0039
ninearabic;0669
ninebengali;09EF
ninecircle;2468
ninecircleinversesansserif;2792
ninedeva;096F
ninegujarati;0AEF
ninegurmukhi;0A6F
ninehackarabic;0669
ninehangzhou;3029
nineideographicparen;3228
nineinferior;2089
ninemonospace;FF19
nineoldstyle;F739
nineparen;247C
nineperiod;2490
ninepersian;06F9
nineroman;2178
ninesuperior;2079
nineteencircle;2472
nineteenparen;2486
nineteenperiod;249A
ninethai;0E59
nj;01CC
njecyrillic;045A
nkatakana;30F3
nkatakanahalfwidth;FF9D
nlegrightlong;019E
nlinebelow;1E49
nmonospace;FF4E
nmsquare;339A
nnabengali;09A3
nnadeva;0923
nnagujarati;0AA3
nnagurmukhi;0A23
nnnadeva;0929
nohiragana;306E
nokatakana;30CE
nokatakanahalfwidth;FF89
nonbreakingspace;00A0
nonenthai;0E13
nonuthai;0E19
noonarabic;0646
noonfinalarabic;FEE6
noonghunnaarabic;06BA
noonghunnafinalarabic;FB9F
noonhehinitialarabic;FEE7 FEEC
nooninitialarabic;FEE7
noonjeeminitialarabic;FCD2
noonjeemisolatedarabic;FC4B
noonmedialarabic;FEE8
noonmeeminitialarabic;FCD5
noonmeemisolatedarabic;FC4E
noonnoonfinalarabic;FC8D
notcontains;220C
notelement;2209
notelementof;2209
notequal;2260
notgreater;226F
notgreaternorequal;2271
notgreaternorless;2279
notidentical;2262
notless;226E
notlessnorequal;2270
notparallel;2226
notprecedes;2280
notsubset;2284
notsucceeds;2281
notsuperset;2285
nowarmenian;0576
nparen;24A9
nssquare;33B1
nsuperior;207F
ntilde;00F1
nu;03BD
nuhiragana;306C
nukatakana;30CC
nukatakanahalfwidth;FF87
nuktabengali;09BC
nuktadeva;093C
nuktagujarati;0ABC
nuktagurmukhi;0A3C
numbersign;0023
numbersignmonospace;FF03
numbersignsmall;FE5F
numeralsigngreek;0374
numeralsignlowergreek;0375
numero;2116
nun;05E0
nundagesh;FB40
nundageshhebrew;FB40
nunhebrew;05E0
nvsquare;33B5
nwsquare;33BB
nyabengali;099E
nyadeva;091E
nyagujarati;0A9E
nyagurmukhi;0A1E
o;006F
oacute;00F3
oangthai;0E2D
obarred;0275
obarredcyrillic;04E9
obarreddieresiscyrillic;04EB
obengali;0993
obopomofo;311B
obreve;014F
ocandradeva;0911
ocandragujarati;0A91
ocandravowelsigndeva;0949
ocandravowelsigngujarati;0AC9
ocaron;01D2
ocircle;24DE
ocircumflex;00F4
ocircumflexacute;1ED1
ocircumflexdotbelow;1ED9
ocircumflexgrave;1ED3
ocircumflexhookabove;1ED5
ocircumflextilde;1ED7
ocyrillic;043E
odblacute;0151
odblgrave;020D
odeva;0913
odieresis;00F6
odieresiscyrillic;04E7
odotbelow;1ECD
oe;0153
oekorean;315A
ogonek;02DB
ogonekcmb;0328
ograve;00F2
ogujarati;0A93
oharmenian;0585
ohiragana;304A
ohookabove;1ECF
ohorn;01A1
ohornacute;1EDB
ohorndotbelow;1EE3
ohorngrave;1EDD
ohornhookabove;1EDF
ohorntilde;1EE1
ohungarumlaut;0151
oi;01A3
oinvertedbreve;020F
okatakana;30AA
okatakanahalfwidth;FF75
okorean;3157
olehebrew;05AB
omacron;014D
omacronacute;1E53
omacrongrave;1E51
omdeva;0950
omega;03C9
omega1;03D6
omegacyrillic;0461
omegalatinclosed;0277
omegaroundcyrillic;047B
omegatitlocyrillic;047D
omegatonos;03CE
omgujarati;0AD0
omicron;03BF
omicrontonos;03CC
omonospace;FF4F
one;0031
onearabic;0661
onebengali;09E7
onecircle;2460
onecircleinversesansserif;278A
onedeva;0967
onedotenleader;2024
oneeighth;215B
onefitted;F6DC
onegujarati;0AE7
onegurmukhi;0A67
onehackarabic;0661
onehalf;00BD
onehangzhou;3021
oneideographicparen;3220
oneinferior;2081
onemonospace;FF11
onenumeratorbengali;09F4
oneoldstyle;F731
oneparen;2474
oneperiod;2488
onepersian;06F1
onequarter;00BC
oneroman;2170
onesuperior;00B9
onethai;0E51
onethird;2153
oogonek;01EB
oogonekmacron;01ED
oogurmukhi;0A13
oomatragurmukhi;0A4B
oopen;0254
oparen;24AA
openbullet;25E6
option;2325
ordfeminine;00AA
ordmasculine;00BA
orthogonal;221F
oshortdeva;0912
oshortvowelsigndeva;094A
oslash;00F8
oslashacute;01FF
osmallhiragana;3049
osmallkatakana;30A9
osmallkatakanahalfwidth;FF6B
ostrokeacute;01FF
osuperior;F6F0
otcyrillic;047F
otilde;00F5
otildeacute;1E4D
otildedieresis;1E4F
oubopomofo;3121
overline;203E
overlinecenterline;FE4A
overlinecmb;0305
overlinedashed;FE49
overlinedblwavy;FE4C
overlinewavy;FE4B
overscore;00AF
ovowelsignbengali;09CB
ovowelsigndeva;094B
ovowelsigngujarati;0ACB
p;0070
paampssquare;3380
paasentosquare;332B
pabengali;09AA
pacute;1E55
padeva;092A
pagedown;21DF
pageup;21DE
pagujarati;0AAA
pagurmukhi;0A2A
pahiragana;3071
paiyannoithai;0E2F
pakatakana;30D1
palatalizationcyrilliccmb;0484
palochkacyrillic;04C0
pansioskorean;317F
paragraph;00B6
parallel;2225
parenleft;0028
parenleftaltonearabic;FD3E
parenleftbt;F8ED
parenleftex;F8EC
parenleftinferior;208D
parenleftmonospace;FF08
parenleftsmall;FE59
parenleftsuperior;207D
parenlefttp;F8EB
parenleftvertical;FE35
parenright;0029
parenrightaltonearabic;FD3F
parenrightbt;F8F8
parenrightex;F8F7
parenrightinferior;208E
parenrightmonospace;FF09
parenrightsmall;FE5A
parenrightsuperior;207E
parenrighttp;F8F6
parenrightvertical;FE36
partialdiff;2202
paseqhebrew;05C0
pashtahebrew;0599
pasquare;33A9
patah;05B7
patah11;05B7
patah1d;05B7
patah2a;05B7
patahhebrew;05B7
patahnarrowhebrew;05B7
patahquarterhebrew;05B7
patahwidehebrew;05B7
pazerhebrew;05A1
pbopomofo;3106
pcircle;24DF
pdotaccent;1E57
pe;05E4
pecyrillic;043F
pedagesh;FB44
pedageshhebrew;FB44
peezisquare;333B
pefinaldageshhebrew;FB43
peharabic;067E
peharmenian;057A
pehebrew;05E4
pehfinalarabic;FB57
pehinitialarabic;FB58
pehiragana;307A
pehmedialarabic;FB59
pekatakana;30DA
pemiddlehookcyrillic;04A7
perafehebrew;FB4E
percent;0025
percentarabic;066A
percentmonospace;FF05
percentsmall;FE6A
period;002E
periodarmenian;0589
periodcentered;00B7
periodhalfwidth;FF61
periodinferior;F6E7
periodmonospace;FF0E
periodsmall;FE52
periodsuperior;F6E8
perispomenigreekcmb;0342
perpendicular;22A5
perthousand;2030
peseta;20A7
pfsquare;338A
phabengali;09AB
phadeva;092B
phagujarati;0AAB
phagurmukhi;0A2B
phi;03C6
phi1;03D5
phieuphacirclekorean;327A
phieuphaparenkorean;321A
phieuphcirclekorean;326C
phieuphkorean;314D
phieuphparenkorean;320C
philatin;0278
phinthuthai;0E3A
phisymbolgreek;03D5
phook;01A5
phophanthai;0E1E
phophungthai;0E1C
phosamphaothai;0E20
pi;03C0
pieupacirclekorean;3273
pieupaparenkorean;3213
pieupcieuckorean;3176
pieupcirclekorean;3265
pieupkiyeokkorean;3172
pieupkorean;3142
pieupparenkorean;3205
pieupsioskiyeokkorean;3174
pieupsioskorean;3144
pieupsiostikeutkorean;3175
pieupthieuthkorean;3177
pieuptikeutkorean;3173
pihiragana;3074
pikatakana;30D4
pisymbolgreek;03D6
piwrarmenian;0583
plus;002B
plusbelowcmb;031F
pluscircle;2295
plusminus;00B1
plusmod;02D6
plusmonospace;FF0B
plussmall;FE62
plussuperior;207A
pmonospace;FF50
pmsquare;33D8
pohiragana;307D
pointingindexdownwhite;261F
pointingindexleftwhite;261C
pointingindexrightwhite;261E
pointingindexupwhite;261D
pokatakana;30DD
poplathai;0E1B
postalmark;3012
postalmarkface;3020
pparen;24AB
precedes;227A
prescription;211E
primemod;02B9
primereversed;2035
product;220F
projective;2305
prolongedkana;30FC
propellor;2318
propersubset;2282
propersuperset;2283
proportion;2237
proportional;221D
psi;03C8
psicyrillic;0471
psilipneumatacyrilliccmb;0486
pssquare;33B0
puhiragana;3077
pukatakana;30D7
pvsquare;33B4
pwsquare;33BA
q;0071
qadeva;0958
qadmahebrew;05A8
qafarabic;0642
qaffinalarabic;FED6
qafinitialarabic;FED7
qafmedialarabic;FED8
qamats;05B8
qamats10;05B8
qamats1a;05B8
qamats1c;05B8
qamats27;05B8
qamats29;05B8
qamats33;05B8
qamatsde;05B8
qamatshebrew;05B8
qamatsnarrowhebrew;05B8
qamatsqatanhebrew;05B8
qamatsqatannarrowhebrew;05B8
qamatsqatanquarterhebrew;05B8
qamatsqatanwidehebrew;05B8
qamatsquarterhebrew;05B8
qamatswidehebrew;05B8
qarneyparahebrew;059F
qbopomofo;3111
qcircle;24E0
qhook;02A0
qmonospace;FF51
qof;05E7
qofdagesh;FB47
qofdageshhebrew;FB47
qofhatafpatah;05E7 05B2
qofhatafpatahhebrew;05E7 05B2
qofhatafsegol;05E7 05B1
qofhatafsegolhebrew;05E7 05B1
qofhebrew;05E7
qofhiriq;05E7 05B4
qofhiriqhebrew;05E7 05B4
qofholam;05E7 05B9
qofholamhebrew;05E7 05B9
qofpatah;05E7 05B7
qofpatahhebrew;05E7 05B7
qofqamats;05E7 05B8
qofqamatshebrew;05E7 05B8
qofqubuts;05E7 05BB
qofqubutshebrew;05E7 05BB
qofsegol;05E7 05B6
qofsegolhebrew;05E7 05B6
qofsheva;05E7 05B0
qofshevahebrew;05E7 05B0
qoftsere;05E7 05B5
qoftserehebrew;05E7 05B5
qparen;24AC
quarternote;2669
qubuts;05BB
qubuts18;05BB
qubuts25;05BB
qubuts31;05BB
qubutshebrew;05BB
qubutsnarrowhebrew;05BB
qubutsquarterhebrew;05BB
qubutswidehebrew;05BB
question;003F
questionarabic;061F
questionarmenian;055E
questiondown;00BF
questiondownsmall;F7BF
questiongreek;037E
questionmonospace;FF1F
questionsmall;F73F
quotedbl;0022
quotedblbase;201E
quotedblleft;201C
quotedblmonospace;FF02
quotedblprime;301E
quotedblprimereversed;301D
quotedblright;201D
quoteleft;2018
quoteleftreversed;201B
quotereversed;201B
quoteright;2019
quoterightn;0149
quotesinglbase;201A
quotesingle;0027
quotesinglemonospace;FF07
r;0072
raarmenian;057C
rabengali;09B0
racute;0155
radeva;0930
radical;221A
radicalex;F8E5
radoverssquare;33AE
radoverssquaredsquare;33AF
radsquare;33AD
rafe;05BF
rafehebrew;05BF
ragujarati;0AB0
ragurmukhi;0A30
rahiragana;3089
rakatakana;30E9
rakatakanahalfwidth;FF97
ralowerdiagonalbengali;09F1
ramiddlediagonalbengali;09F0
ramshorn;0264
ratio;2236
rbopomofo;3116
rcaron;0159
rcedilla;0157
rcircle;24E1
rcommaaccent;0157
rdblgrave;0211
rdotaccent;1E59
rdotbelow;1E5B
rdotbelowmacron;1E5D
referencemark;203B
reflexsubset;2286
reflexsuperset;2287
registered;00AE
registersans;F8E8
registerserif;F6DA
reharabic;0631
reharmenian;0580
rehfinalarabic;FEAE
rehiragana;308C
rehyehaleflamarabic;0631 FEF3 FE8E 0644
rekatakana;30EC
rekatakanahalfwidth;FF9A
resh;05E8
reshdageshhebrew;FB48
reshhatafpatah;05E8 05B2
reshhatafpatahhebrew;05E8 05B2
reshhatafsegol;05E8 05B1
reshhatafsegolhebrew;05E8 05B1
reshhebrew;05E8
reshhiriq;05E8 05B4
reshhiriqhebrew;05E8 05B4
reshholam;05E8 05B9
reshholamhebrew;05E8 05B9
reshpatah;05E8 05B7
reshpatahhebrew;05E8 05B7
reshqamats;05E8 05B8
reshqamatshebrew;05E8 05B8
reshqubuts;05E8 05BB
reshqubutshebrew;05E8 05BB
reshsegol;05E8 05B6
reshsegolhebrew;05E8 05B6
reshsheva;05E8 05B0
reshshevahebrew;05E8 05B0
reshtsere;05E8 05B5
reshtserehebrew;05E8 05B5
reversedtilde;223D
reviahebrew;0597
reviamugrashhebrew;0597
revlogicalnot;2310
rfishhook;027E
rfishhookreversed;027F
rhabengali;09DD
rhadeva;095D
rho;03C1
rhook;027D
rhookturned;027B
rhookturnedsuperior;02B5
rhosymbolgreek;03F1
rhotichookmod;02DE
rieulacirclekorean;3271
rieulaparenkorean;3211
rieulcirclekorean;3263
rieulhieuhkorean;3140
rieulkiyeokkorean;313A
rieulkiyeoksioskorean;3169
rieulkorean;3139
rieulmieumkorean;313B
rieulpansioskorean;316C
rieulparenkorean;3203
rieulphieuphkorean;313F
rieulpieupkorean;313C
rieulpieupsioskorean;316B
rieulsioskorean;313D
rieulthieuthkorean;313E
rieultikeutkorean;316A
rieulyeorinhieuhkorean;316D
rightangle;221F
righttackbelowcmb;0319
righttriangle;22BF
rihiragana;308A
rikatakana;30EA
rikatakanahalfwidth;FF98
ring;02DA
ringbelowcmb;0325
ringcmb;030A
ringhalfleft;02BF
ringhalfleftarmenian;0559
ringhalfleftbelowcmb;031C
ringhalfleftcentered;02D3
ringhalfright;02BE
ringhalfrightbelowcmb;0339
ringhalfrightcentered;02D2
rinvertedbreve;0213
rittorusquare;3351
rlinebelow;1E5F
rlongleg;027C
rlonglegturned;027A
rmonospace;FF52
rohiragana;308D
rokatakana;30ED
rokatakanahalfwidth;FF9B
roruathai;0E23
rparen;24AD
rrabengali;09DC
rradeva;0931
rragurmukhi;0A5C
rreharabic;0691
rrehfinalarabic;FB8D
rrvocalicbengali;09E0
rrvocalicdeva;0960
rrvocalicgujarati;0AE0
rrvocalicvowelsignbengali;09C4
rrvocalicvowelsigndeva;0944
rrvocalicvowelsigngujarati;0AC4
rsuperior;F6F1
rtblock;2590
rturned;0279
rturnedsuperior;02B4
ruhiragana;308B
rukatakana;30EB
rukatakanahalfwidth;FF99
rupeemarkbengali;09F2
rupeesignbengali;09F3
rupiah;F6DD
ruthai;0E24
rvocalicbengali;098B
rvocalicdeva;090B
rvocalicgujarati;0A8B
rvocalicvowelsignbengali;09C3
rvocalicvowelsigndeva;0943
rvocalicvowelsigngujarati;0AC3
s;0073
sabengali;09B8
sacute;015B
sacutedotaccent;1E65
sadarabic;0635
sadeva;0938
sadfinalarabic;FEBA
sadinitialarabic;FEBB
sadmedialarabic;FEBC
sagujarati;0AB8
sagurmukhi;0A38
sahiragana;3055
sakatakana;30B5
sakatakanahalfwidth;FF7B
sallallahoualayhewasallamarabic;FDFA
samekh;05E1
samekhdagesh;FB41
samekhdageshhebrew;FB41
samekhhebrew;05E1
saraaathai;0E32
saraaethai;0E41
saraaimaimalaithai;0E44
saraaimaimuanthai;0E43
saraamthai;0E33
saraathai;0E30
saraethai;0E40
saraiileftthai;F886
saraiithai;0E35
saraileftthai;F885
saraithai;0E34
saraothai;0E42
saraueeleftthai;F888
saraueethai;0E37
saraueleftthai;F887
sarauethai;0E36
sarauthai;0E38
sarauuthai;0E39
sbopomofo;3119
scaron;0161
scarondotaccent;1E67
scedilla;015F
schwa;0259
schwacyrillic;04D9
schwadieresiscyrillic;04DB
schwahook;025A
scircle;24E2
scircumflex;015D
scommaaccent;0219
sdotaccent;1E61
sdotbelow;1E63
sdotbelowdotaccent;1E69
seagullbelowcmb;033C
second;2033
secondtonechinese;02CA
section;00A7
seenarabic;0633
seenfinalarabic;FEB2
seeninitialarabic;FEB3
seenmedialarabic;FEB4
segol;05B6
segol13;05B6
segol1f;05B6
segol2c;05B6
segolhebrew;05B6
segolnarrowhebrew;05B6
segolquarterhebrew;05B6
segoltahebrew;0592
segolwidehebrew;05B6
seharmenian;057D
sehiragana;305B
sekatakana;30BB
sekatakanahalfwidth;FF7E
semicolon;003B
semicolonarabic;061B
semicolonmonospace;FF1B
semicolonsmall;FE54
semivoicedmarkkana;309C
semivoicedmarkkanahalfwidth;FF9F
sentisquare;3322
sentosquare;3323
seven;0037
sevenarabic;0667
sevenbengali;09ED
sevencircle;2466
sevencircleinversesansserif;2790
sevendeva;096D
seveneighths;215E
sevengujarati;0AED
sevengurmukhi;0A6D
sevenhackarabic;0667
sevenhangzhou;3027
sevenideographicparen;3226
seveninferior;2087
sevenmonospace;FF17
sevenoldstyle;F737
sevenparen;247A
sevenperiod;248E
sevenpersian;06F7
sevenroman;2176
sevensuperior;2077
seventeencircle;2470
seventeenparen;2484
seventeenperiod;2498
seventhai;0E57
sfthyphen;00AD
shaarmenian;0577
shabengali;09B6
shacyrillic;0448
shaddaarabic;0651
shaddadammaarabic;FC61
shaddadammatanarabic;FC5E
shaddafathaarabic;FC60
shaddafathatanarabic;0651 064B
shaddakasraarabic;FC62
shaddakasratanarabic;FC5F
shade;2592
shadedark;2593
shadelight;2591
shademedium;2592
shadeva;0936
shagujarati;0AB6
shagurmukhi;0A36
shalshelethebrew;0593
shbopomofo;3115
shchacyrillic;0449
sheenarabic;0634
sheenfinalarabic;FEB6
sheeninitialarabic;FEB7
sheenmedialarabic;FEB8
sheicoptic;03E3
sheqel;20AA
sheqelhebrew;20AA
sheva;05B0
sheva115;05B0
sheva15;05B0
sheva22;05B0
sheva2e;05B0
shevahebrew;05B0
shevanarrowhebrew;05B0
shevaquarterhebrew;05B0
shevawidehebrew;05B0
shhacyrillic;04BB
shimacoptic;03ED
shin;05E9
shindagesh;FB49
shindageshhebrew;FB49
shindageshshindot;FB2C
shindageshshindothebrew;FB2C
shindageshsindot;FB2D
shindageshsindothebrew;FB2D
shindothebrew;05C1
shinhebrew;05E9
shinshindot;FB2A
shinshindothebrew;FB2A
shinsindot;FB2B
shinsindothebrew;FB2B
shook;0282
sigma;03C3
sigma1;03C2
sigmafinal;03C2
sigmalunatesymbolgreek;03F2
sihiragana;3057
sikatakana;30B7
sikatakanahalfwidth;FF7C
siluqhebrew;05BD
siluqlefthebrew;05BD
similar;223C
sindothebrew;05C2
siosacirclekorean;3274
siosaparenkorean;3214
sioscieuckorean;317E
sioscirclekorean;3266
sioskiyeokkorean;317A
sioskorean;3145
siosnieunkorean;317B
siosparenkorean;3206
siospieupkorean;317D
siostikeutkorean;317C
six;0036
sixarabic;0666
sixbengali;09EC
sixcircle;2465
sixcircleinversesansserif;278F
sixdeva;096C
sixgujarati;0AEC
sixgurmukhi;0A6C
sixhackarabic;0666
sixhangzhou;3026
sixideographicparen;3225
sixinferior;2086
sixmonospace;FF16
sixoldstyle;F736
sixparen;2479
sixperiod;248D
sixpersian;06F6
sixroman;2175
sixsuperior;2076
sixteencircle;246F
sixteencurrencydenominatorbengali;09F9
sixteenparen;2483
sixteenperiod;2497
sixthai;0E56
slash;002F
slashmonospace;FF0F
slong;017F
slongdotaccent;1E9B
smileface;263A
smonospace;FF53
sofpasuqhebrew;05C3
softhyphen;00AD
softsigncyrillic;044C
sohiragana;305D
sokatakana;30BD
sokatakanahalfwidth;FF7F
soliduslongoverlaycmb;0338
solidusshortoverlaycmb;0337
sorusithai;0E29
sosalathai;0E28
sosothai;0E0B
sosuathai;0E2A
space;0020
spacehackarabic;0020
spade;2660
spadesuitblack;2660
spadesuitwhite;2664
sparen;24AE
squarebelowcmb;033B
squarecc;33C4
squarecm;339D
squarediagonalcrosshatchfill;25A9
squarehorizontalfill;25A4
squarekg;338F
squarekm;339E
squarekmcapital;33CE
squareln;33D1
squarelog;33D2
squaremg;338E
squaremil;33D5
squaremm;339C
squaremsquared;33A1
squareorthogonalcrosshatchfill;25A6
squareupperlefttolowerrightfill;25A7
squareupperrighttolowerleftfill;25A8
squareverticalfill;25A5
squarewhitewithsmallblack;25A3
srsquare;33DB
ssabengali;09B7
ssadeva;0937
ssagujarati;0AB7
ssangcieuckorean;3149
ssanghieuhkorean;3185
ssangieungkorean;3180
ssangkiyeokkorean;3132
ssangnieunkorean;3165
ssangpieupkorean;3143
ssangsioskorean;3146
ssangtikeutkorean;3138
ssuperior;F6F2
sterling;00A3
sterlingmonospace;FFE1
strokelongoverlaycmb;0336
strokeshortoverlaycmb;0335
subset;2282
subsetnotequal;228A
subsetorequal;2286
succeeds;227B
suchthat;220B
suhiragana;3059
sukatakana;30B9
sukatakanahalfwidth;FF7D
sukunarabic;0652
summation;2211
sun;263C
superset;2283
supersetnotequal;228B
supersetorequal;2287
svsquare;33DC
syouwaerasquare;337C
t;0074
tabengali;09A4
tackdown;22A4
tackleft;22A3
tadeva;0924
tagujarati;0AA4
tagurmukhi;0A24
taharabic;0637
tahfinalarabic;FEC2
tahinitialarabic;FEC3
tahiragana;305F
tahmedialarabic;FEC4
taisyouerasquare;337D
takatakana;30BF
takatakanahalfwidth;FF80
tatweelarabic;0640
tau;03C4
tav;05EA
tavdages;FB4A
tavdagesh;FB4A
tavdageshhebrew;FB4A
tavhebrew;05EA
tbar;0167
tbopomofo;310A
tcaron;0165
tccurl;02A8
tcedilla;0163
tcheharabic;0686
tchehfinalarabic;FB7B
tchehinitialarabic;FB7C
tchehmedialarabic;FB7D
tchehmeeminitialarabic;FB7C FEE4
tcircle;24E3
tcircumflexbelow;1E71
tcommaaccent;0163
tdieresis;1E97
tdotaccent;1E6B
tdotbelow;1E6D
tecyrillic;0442
tedescendercyrillic;04AD
teharabic;062A
tehfinalarabic;FE96
tehhahinitialarabic;FCA2
tehhahisolatedarabic;FC0C
tehinitialarabic;FE97
tehiragana;3066
tehjeeminitialarabic;FCA1
tehjeemisolatedarabic;FC0B
tehmarbutaarabic;0629
tehmarbutafinalarabic;FE94
tehmedialarabic;FE98
tehmeeminitialarabic;FCA4
tehmeemisolatedarabic;FC0E
tehnoonfinalarabic;FC73
tekatakana;30C6
tekatakanahalfwidth;FF83
telephone;2121
telephoneblack;260E
telishagedolahebrew;05A0
telishaqetanahebrew;05A9
tencircle;2469
tenideographicparen;3229
tenparen;247D
tenperiod;2491
tenroman;2179
tesh;02A7
tet;05D8
tetdagesh;FB38
tetdageshhebrew;FB38
tethebrew;05D8
tetsecyrillic;04B5
tevirhebrew;059B
tevirlefthebrew;059B
thabengali;09A5
thadeva;0925
thagujarati;0AA5
thagurmukhi;0A25
thalarabic;0630
thalfinalarabic;FEAC
thanthakhatlowleftthai;F898
thanthakhatlowrightthai;F897
thanthakhatthai;0E4C
thanthakhatupperleftthai;F896
theharabic;062B
thehfinalarabic;FE9A
thehinitialarabic;FE9B
thehmedialarabic;FE9C
thereexists;2203
therefore;2234
theta;03B8
theta1;03D1
thetasymbolgreek;03D1
thieuthacirclekorean;3279
thieuthaparenkorean;3219
thieuthcirclekorean;326B
thieuthkorean;314C
thieuthparenkorean;320B
thirteencircle;246C
thirteenparen;2480
thirteenperiod;2494
thonangmonthothai;0E11
thook;01AD
thophuthaothai;0E12
thorn;00FE
thothahanthai;0E17
thothanthai;0E10
thothongthai;0E18
thothungthai;0E16
thousandcyrillic;0482
thousandsseparatorarabic;066C
thousandsseparatorpersian;066C
three;0033
threearabic;0663
threebengali;09E9
threecircle;2462
threecircleinversesansserif;278C
threedeva;0969
threeeighths;215C
threegujarati;0AE9
threegurmukhi;0A69
threehackarabic;0663
threehangzhou;3023
threeideographicparen;3222
threeinferior;2083
threemonospace;FF13
threenumeratorbengali;09F6
threeoldstyle;F733
threeparen;2476
threeperiod;248A
threepersian;06F3
threequarters;00BE
threequartersemdash;F6DE
threeroman;2172
threesuperior;00B3
threethai;0E53
thzsquare;3394
tihiragana;3061
tikatakana;30C1
tikatakanahalfwidth;FF81
tikeutacirclekorean;3270
tikeutaparenkorean;3210
tikeutcirclekorean;3262
tikeutkorean;3137
tikeutparenkorean;3202
tilde;02DC
tildebelowcmb;0330
tildecmb;0303
tildecomb;0303
tildedoublecmb;0360
tildeoperator;223C
tildeoverlaycmb;0334
tildeverticalcmb;033E
timescircle;2297
tipehahebrew;0596
tipehalefthebrew;0596
tippigurmukhi;0A70
titlocyrilliccmb;0483
tiwnarmenian;057F
tlinebelow;1E6F
tmonospace;FF54
toarmenian;0569
tohiragana;3068
tokatakana;30C8
tokatakanahalfwidth;FF84
tonebarextrahighmod;02E5
tonebarextralowmod;02E9
tonebarhighmod;02E6
tonebarlowmod;02E8
tonebarmidmod;02E7
tonefive;01BD
tonesix;0185
tonetwo;01A8
tonos;0384
tonsquare;3327
topatakthai;0E0F
tortoiseshellbracketleft;3014
tortoiseshellbracketleftsmall;FE5D
tortoiseshellbracketleftvertical;FE39
tortoiseshellbracketright;3015
tortoiseshellbracketrightsmall;FE5E
tortoiseshellbracketrightvertical;FE3A
totaothai;0E15
tpalatalhook;01AB
tparen;24AF
trademark;2122
trademarksans;F8EA
trademarkserif;F6DB
tretroflexhook;0288
triagdn;25BC
triaglf;25C4
triagrt;25BA
triagup;25B2
ts;02A6
tsadi;05E6
tsadidagesh;FB46
tsadidageshhebrew;FB46
tsadihebrew;05E6
tsecyrillic;0446
tsere;05B5
tsere12;05B5
tsere1e;05B5
tsere2b;05B5
tserehebrew;05B5
tserenarrowhebrew;05B5
tserequarterhebrew;05B5
tserewidehebrew;05B5
tshecyrillic;045B
tsuperior;F6F3
ttabengali;099F
ttadeva;091F
ttagujarati;0A9F
ttagurmukhi;0A1F
tteharabic;0679
ttehfinalarabic;FB67
ttehinitialarabic;FB68
ttehmedialarabic;FB69
tthabengali;09A0
tthadeva;0920
tthagujarati;0AA0
tthagurmukhi;0A20
tturned;0287
tuhiragana;3064
tukatakana;30C4
tukatakanahalfwidth;FF82
tusmallhiragana;3063
tusmallkatakana;30C3
tusmallkatakanahalfwidth;FF6F
twelvecircle;246B
twelveparen;247F
twelveperiod;2493
twelveroman;217B
twentycircle;2473
twentyhangzhou;5344
twentyparen;2487
twentyperiod;249B
two;0032
twoarabic;0662
twobengali;09E8
twocircle;2461
twocircleinversesansserif;278B
twodeva;0968
twodotenleader;2025
twodotleader;2025
twodotleadervertical;FE30
twogujarati;0AE8
twogurmukhi;0A68
twohackarabic;0662
twohangzhou;3022
twoideographicparen;3221
twoinferior;2082
twomonospace;FF12
twonumeratorbengali;09F5
twooldstyle;F732
twoparen;2475
twoperiod;2489
twopersian;06F2
tworoman;2171
twostroke;01BB
twosuperior;00B2
twothai;0E52
twothirds;2154
u;0075
uacute;00FA
ubar;0289
ubengali;0989
ubopomofo;3128
ubreve;016D
ucaron;01D4
ucircle;24E4
ucircumflex;00FB
ucircumflexbelow;1E77
ucyrillic;0443
udattadeva;0951
udblacute;0171
udblgrave;0215
udeva;0909
udieresis;00FC
udieresisacute;01D8
udieresisbelow;1E73
udieresiscaron;01DA
udieresiscyrillic;04F1
udieresisgrave;01DC
udieresismacron;01D6
udotbelow;1EE5
ugrave;00F9
ugujarati;0A89
ugurmukhi;0A09
uhiragana;3046
uhookabove;1EE7
uhorn;01B0
uhornacute;1EE9
uhorndotbelow;1EF1
uhorngrave;1EEB
uhornhookabove;1EED
uhorntilde;1EEF
uhungarumlaut;0171
uhungarumlautcyrillic;04F3
uinvertedbreve;0217
ukatakana;30A6
ukatakanahalfwidth;FF73
ukcyrillic;0479
ukorean;315C
umacron;016B
umacroncyrillic;04EF
umacrondieresis;1E7B
umatragurmukhi;0A41
umonospace;FF55
underscore;005F
underscoredbl;2017
underscoremonospace;FF3F
underscorevertical;FE33
underscorewavy;FE4F
union;222A
universal;2200
uogonek;0173
uparen;24B0
upblock;2580
upperdothebrew;05C4
upsilon;03C5
upsilondieresis;03CB
upsilondieresistonos;03B0
upsilonlatin;028A
upsilontonos;03CD
uptackbelowcmb;031D
uptackmod;02D4
uragurmukhi;0A73
uring;016F
ushortcyrillic;045E
usmallhiragana;3045
usmallkatakana;30A5
usmallkatakanahalfwidth;FF69
ustraightcyrillic;04AF
ustraightstrokecyrillic;04B1
utilde;0169
utildeacute;1E79
utildebelow;1E75
uubengali;098A
uudeva;090A
uugujarati;0A8A
uugurmukhi;0A0A
uumatragurmukhi;0A42
uuvowelsignbengali;09C2
uuvowelsigndeva;0942
uuvowelsigngujarati;0AC2
uvowelsignbengali;09C1
uvowelsigndeva;0941
uvowelsigngujarati;0AC1
v;0076
vadeva;0935
vagujarati;0AB5
vagurmukhi;0A35
vakatakana;30F7
vav;05D5
vavdagesh;FB35
vavdagesh65;FB35
vavdageshhebrew;FB35
vavhebrew;05D5
vavholam;FB4B
vavholamhebrew;FB4B
vavvavhebrew;05F0
vavyodhebrew;05F1
vcircle;24E5
vdotbelow;1E7F
vecyrillic;0432
veharabic;06A4
vehfinalarabic;FB6B
vehinitialarabic;FB6C
vehmedialarabic;FB6D
vekatakana;30F9
venus;2640
verticalbar;007C
verticallineabovecmb;030D
verticallinebelowcmb;0329
verticallinelowmod;02CC
verticallinemod;02C8
vewarmenian;057E
vhook;028B
vikatakana;30F8
viramabengali;09CD
viramadeva;094D
viramagujarati;0ACD
visargabengali;0983
visargadeva;0903
visargagujarati;0A83
vmonospace;FF56
voarmenian;0578
voicediterationhiragana;309E
voicediterationkatakana;30FE
voicedmarkkana;309B
voicedmarkkanahalfwidth;FF9E
vokatakana;30FA
vparen;24B1
vtilde;1E7D
vturned;028C
vuhiragana;3094
vukatakana;30F4
w;0077
wacute;1E83
waekorean;3159
wahiragana;308F
wakatakana;30EF
wakatakanahalfwidth;FF9C
wakorean;3158
wasmallhiragana;308E
wasmallkatakana;30EE
wattosquare;3357
wavedash;301C
wavyunderscorevertical;FE34
wawarabic;0648
wawfinalarabic;FEEE
wawhamzaabovearabic;0624
wawhamzaabovefinalarabic;FE86
wbsquare;33DD
wcircle;24E6
wcircumflex;0175
wdieresis;1E85
wdotaccent;1E87
wdotbelow;1E89
wehiragana;3091
weierstrass;2118
wekatakana;30F1
wekorean;315E
weokorean;315D
wgrave;1E81
whitebullet;25E6
whitecircle;25CB
whitecircleinverse;25D9
whitecornerbracketleft;300E
whitecornerbracketleftvertical;FE43
whitecornerbracketright;300F
whitecornerbracketrightvertical;FE44
whitediamond;25C7
whitediamondcontainingblacksmalldiamond;25C8
whitedownpointingsmalltriangle;25BF
whitedownpointingtriangle;25BD
whiteleftpointingsmalltriangle;25C3
whiteleftpointingtriangle;25C1
whitelenticularbracketleft;3016
whitelenticularbracketright;3017
whiterightpointingsmalltriangle;25B9
whiterightpointingtriangle;25B7
whitesmallsquare;25AB
whitesmilingface;263A
whitesquare;25A1
whitestar;2606
whitetelephone;260F
whitetortoiseshellbracketleft;3018
whitetortoiseshellbracketright;3019
whiteuppointingsmalltriangle;25B5
whiteuppointingtriangle;25B3
wihiragana;3090
wikatakana;30F0
wikorean;315F
wmonospace;FF57
wohiragana;3092
wokatakana;30F2
wokatakanahalfwidth;FF66
won;20A9
wonmonospace;FFE6
wowaenthai;0E27
wparen;24B2
wring;1E98
wsuperior;02B7
wturned;028D
wynn;01BF
x;0078
xabovecmb;033D
xbopomofo;3112
xcircle;24E7
xdieresis;1E8D
xdotaccent;1E8B
xeharmenian;056D
xi;03BE
xmonospace;FF58
xparen;24B3
xsuperior;02E3
y;0079
yaadosquare;334E
yabengali;09AF
yacute;00FD
yadeva;092F
yaekorean;3152
yagujarati;0AAF
yagurmukhi;0A2F
yahiragana;3084
yakatakana;30E4
yakatakanahalfwidth;FF94
yakorean;3151
yamakkanthai;0E4E
yasmallhiragana;3083
yasmallkatakana;30E3
yasmallkatakanahalfwidth;FF6C
yatcyrillic;0463
ycircle;24E8
ycircumflex;0177
ydieresis;00FF
ydotaccent;1E8F
ydotbelow;1EF5
yeharabic;064A
yehbarreearabic;06D2
yehbarreefinalarabic;FBAF
yehfinalarabic;FEF2
yehhamzaabovearabic;0626
yehhamzaabovefinalarabic;FE8A
yehhamzaaboveinitialarabic;FE8B
yehhamzaabovemedialarabic;FE8C
yehinitialarabic;FEF3
yehmedialarabic;FEF4
yehmeeminitialarabic;FCDD
yehmeemisolatedarabic;FC58
yehnoonfinalarabic;FC94
yehthreedotsbelowarabic;06D1
yekorean;3156
yen;00A5
yenmonospace;FFE5
yeokorean;3155
yeorinhieuhkorean;3186
yerahbenyomohebrew;05AA
yerahbenyomolefthebrew;05AA
yericyrillic;044B
yerudieresiscyrillic;04F9
yesieungkorean;3181
yesieungpansioskorean;3183
yesieungsioskorean;3182
yetivhebrew;059A
ygrave;1EF3
yhook;01B4
yhookabove;1EF7
yiarmenian;0575
yicyrillic;0457
yikorean;3162
yinyang;262F
yiwnarmenian;0582
ymonospace;FF59
yod;05D9
yoddagesh;FB39
yoddageshhebrew;FB39
yodhebrew;05D9
yodyodhebrew;05F2
yodyodpatahhebrew;FB1F
yohiragana;3088
yoikorean;3189
yokatakana;30E8
yokatakanahalfwidth;FF96
yokorean;315B
yosmallhiragana;3087
yosmallkatakana;30E7
yosmallkatakanahalfwidth;FF6E
yotgreek;03F3
yoyaekorean;3188
yoyakorean;3187
yoyakthai;0E22
yoyingthai;0E0D
yparen;24B4
ypogegrammeni;037A
ypogegrammenigreekcmb;0345
yr;01A6
yring;1E99
ysuperior;02B8
ytilde;1EF9
yturned;028E
yuhiragana;3086
yuikorean;318C
yukatakana;30E6
yukatakanahalfwidth;FF95
yukorean;3160
yusbigcyrillic;046B
yusbigiotifiedcyrillic;046D
yuslittlecyrillic;0467
yuslittleiotifiedcyrillic;0469
yusmallhiragana;3085
yusmallkatakana;30E5
yusmallkatakanahalfwidth;FF6D
yuyekorean;318B
yuyeokorean;318A
yyabengali;09DF
yyadeva;095F
z;007A
zaarmenian;0566
zacute;017A
zadeva;095B
zagurmukhi;0A5B
zaharabic;0638
zahfinalarabic;FEC6
zahinitialarabic;FEC7
zahiragana;3056
zahmedialarabic;FEC8
zainarabic;0632
zainfinalarabic;FEB0
zakatakana;30B6
zaqefgadolhebrew;0595
zaqefqatanhebrew;0594
zarqahebrew;0598
zayin;05D6
zayindagesh;FB36
zayindageshhebrew;FB36
zayinhebrew;05D6
zbopomofo;3117
zcaron;017E
zcircle;24E9
zcircumflex;1E91
zcurl;0291
zdot;017C
zdotaccent;017C
zdotbelow;1E93
zecyrillic;0437
zedescendercyrillic;0499
zedieresiscyrillic;04DF
zehiragana;305C
zekatakana;30BC
zero;0030
zeroarabic;0660
zerobengali;09E6
zerodeva;0966
zerogujarati;0AE6
zerogurmukhi;0A66
zerohackarabic;0660
zeroinferior;2080
zeromonospace;FF10
zerooldstyle;F730
zeropersian;06F0
zerosuperior;2070
zerothai;0E50
zerowidthjoiner;FEFF
zerowidthnonjoiner;200C
zerowidthspace;200B
zeta;03B6
zhbopomofo;3113
zhearmenian;056A
zhebrevecyrillic;04C2
zhecyrillic;0436
zhedescendercyrillic;0497
zhedieresiscyrillic;04DD
zihiragana;3058
zikatakana;30B8
zinorhebrew;05AE
zlinebelow;1E95
zmonospace;FF5A
zohiragana;305E
zokatakana;30BE
zparen;24B5
zretroflexhook;0290
zstroke;01B6
zuhiragana;305A
zukatakana;30BA
a100;275E
a101;2761
a102;2762
a103;2763
a104;2764
a105;2710
a106;2765
a107;2766
a108;2767
a109;2660
a10;2721
a110;2665
a111;2666
a112;2663
a117;2709
a118;2708
a119;2707
a11;261B
a120;2460
a121;2461
a122;2462
a123;2463
a124;2464
a125;2465
a126;2466
a127;2467
a128;2468
a129;2469
a12;261E
a130;2776
a131;2777
a132;2778
a133;2779
a134;277A
a135;277B
a136;277C
a137;277D
a138;277E
a139;277F
a13;270C
a140;2780
a141;2781
a142;2782
a143;2783
a144;2784
a145;2785
a146;2786
a147;2787
a148;2788
a149;2789
a14;270D
a150;278A
a151;278B
a152;278C
a153;278D
a154;278E
a155;278F
a156;2790
a157;2791
a158;2792
a159;2793
a15;270E
a160;2794
a161;2192
a162;27A3
a163;2194
a164;2195
a165;2799
a166;279B
a167;279C
a168;279D
a169;279E
a16;270F
a170;279F
a171;27A0
a172;27A1
a173;27A2
a174;27A4
a175;27A5
a176;27A6
a177;27A7
a178;27A8
a179;27A9
a17;2711
a180;27AB
a181;27AD
a182;27AF
a183;27B2
a184;27B3
a185;27B5
a186;27B8
a187;27BA
a188;27BB
a189;27BC
a18;2712
a190;27BD
a191;27BE
a192;279A
a193;27AA
a194;27B6
a195;27B9
a196;2798
a197;27B4
a198;27B7
a199;27AC
a19;2713
a1;2701
a200;27AE
a201;27B1
a202;2703
a203;2750
a204;2752
a205;276E
a206;2770
a20;2714
a21;2715
a22;2716
a23;2717
a24;2718
a25;2719
a26;271A
a27;271B
a28;271C
a29;2722
a2;2702
a30;2723
a31;2724
a32;2725
a33;2726
a34;2727
a35;2605
a36;2729
a37;272A
a38;272B
a39;272C
a3;2704
a40;272D
a41;272E
a42;272F
a43;2730
a44;2731
a45;2732
a46;2733
a47;2734
a48;2735
a49;2736
a4;260E
a50;2737
a51;2738
a52;2739
a53;273A
a54;273B
a55;273C
a56;273D
a57;273E
a58;273F
a59;2740
a5;2706
a60;2741
a61;2742
a62;2743
a63;2744
a64;2745
a65;2746
a66;2747
a67;2748
a68;2749
a69;274A
a6;271D
a70;274B
a71;25CF
a72;274D
a73;25A0
a74;274F
a75;2751
a76;25B2
a77;25BC
a78;25C6
a79;2756
a7;271E
a81;25D7
a82;2758
a83;2759
a84;275A
a85;276F
a86;2771
a87;2772
a88;2773
a89;2768
a8;271F
a90;2769
a91;276C
a92;276D
a93;276A
a94;276B
a95;2774
a96;2775
a97;275B
a98;275C
a99;275D
a9;2720
"""
# string table management
#
class StringTable:
def __init__( self, name_list, master_table_name ):
self.names = name_list
self.master_table = master_table_name
self.indices = {}
index = 0
for name in name_list:
self.indices[name] = index
index += len( name ) + 1
self.total = index
def dump( self, file ):
write = file.write
write( " static const char " + self.master_table +
"[" + repr( self.total ) + "] =\n" )
write( " {\n" )
line = ""
for name in self.names:
line += " '"
line += string.join( ( re.findall( ".", name ) ), "','" )
line += "', 0,\n"
write( line + " };\n\n\n" )
def dump_sublist( self, file, table_name, macro_name, sublist ):
write = file.write
write( "#define " + macro_name + " " + repr( len( sublist ) ) + "\n\n" )
write( " /* Values are offsets into the `" +
self.master_table + "' table */\n\n" )
write( " static const short " + table_name +
"[" + macro_name + "] =\n" )
write( " {\n" )
line = " "
comma = ""
col = 0
for name in sublist:
line += comma
line += "%4d" % self.indices[name]
col += 1
comma = ","
if col == 14:
col = 0
comma = ",\n "
write( line + "\n };\n\n\n" )
# We now store the Adobe Glyph List in compressed form. The list is put
# into a data structure called `trie' (because it has a tree-like
# appearance). Consider, for example, that you want to store the
# following name mapping:
#
# A => 1
# Aacute => 6
# Abalon => 2
# Abstract => 4
#
# It is possible to store the entries as follows.
#
# A => 1
# |
# +-acute => 6
# |
# +-b
# |
# +-alon => 2
# |
# +-stract => 4
#
# We see that each node in the trie has:
#
# - one or more `letters'
# - an optional value
# - zero or more child nodes
#
# The first step is to call
#
# root = StringNode( "", 0 )
# for word in map.values():
# root.add( word, map[word] )
#
# which creates a large trie where each node has only one children.
#
# Executing
#
# root = root.optimize()
#
# optimizes the trie by merging the letters of successive nodes whenever
# possible.
#
# Each node of the trie is stored as follows.
#
# - First the node's letter, according to the following scheme. We
# use the fact that in the AGL no name contains character codes > 127.
#
# name bitsize description
# ----------------------------------------------------------------
# notlast 1 Set to 1 if this is not the last letter
# in the word.
# ascii 7 The letter's ASCII value.
#
# - The letter is followed by a children count and the value of the
# current key (if any). Again we can do some optimization because all
# AGL entries are from the BMP; this means that 16 bits are sufficient
# to store its Unicode values. Additionally, no node has more than
# 127 children.
#
# name bitsize description
# -----------------------------------------
# hasvalue 1 Set to 1 if a 16-bit Unicode value follows.
# num_children 7 Number of children. Can be 0 only if
# `hasvalue' is set to 1.
# value 16 Optional Unicode value.
#
# - A node is finished by a list of 16bit absolute offsets to the
# children, which must be sorted in increasing order of their first
# letter.
#
# For simplicity, all 16bit quantities are stored in big-endian order.
#
# The root node has first letter = 0, and no value.
#
class StringNode:
def __init__( self, letter, value ):
self.letter = letter
self.value = value
self.children = {}
def __cmp__( self, other ):
return ord( self.letter[0] ) - ord( other.letter[0] )
def add( self, word, value ):
if len( word ) == 0:
self.value = value
return
letter = word[0]
word = word[1:]
if self.children.has_key( letter ):
child = self.children[letter]
else:
child = StringNode( letter, 0 )
self.children[letter] = child
child.add( word, value )
def optimize( self ):
# optimize all children first
children = self.children.values()
self.children = {}
for child in children:
self.children[child.letter[0]] = child.optimize()
# don't optimize if there's a value,
# if we don't have any child or if we
# have more than one child
if ( self.value != 0 ) or ( not children ) or len( children ) > 1:
return self
child = children[0]
self.letter += child.letter
self.value = child.value
self.children = child.children
return self
def dump_debug( self, write, margin ):
# this is used during debugging
line = margin + "+-"
if len( self.letter ) == 0:
line += "<NOLETTER>"
else:
line += self.letter
if self.value:
line += " => " + repr( self.value )
write( line + "\n" )
if self.children:
margin += "| "
for child in self.children.values():
child.dump_debug( write, margin )
def locate( self, index ):
self.index = index
if len( self.letter ) > 0:
index += len( self.letter ) + 1
else:
index += 2
if self.value != 0:
index += 2
children = self.children.values()
children.sort()
index += 2 * len( children )
for child in children:
index = child.locate( index )
return index
def store( self, storage ):
# write the letters
l = len( self.letter )
if l == 0:
storage += struct.pack( "B", 0 )
else:
for n in range( l ):
val = ord( self.letter[n] )
if n < l - 1:
val += 128
storage += struct.pack( "B", val )
# write the count
children = self.children.values()
children.sort()
count = len( children )
if self.value != 0:
storage += struct.pack( "!BH", count + 128, self.value )
else:
storage += struct.pack( "B", count )
for child in children:
storage += struct.pack( "!H", child.index )
for child in children:
storage = child.store( storage )
return storage
def adobe_glyph_values():
"""return the list of glyph names and their unicode values"""
lines = string.split( adobe_glyph_list, '\n' )
glyphs = []
values = []
for line in lines:
if line:
fields = string.split( line, ';' )
# print fields[1] + ' - ' + fields[0]
subfields = string.split( fields[1], ' ' )
if len( subfields ) == 1:
glyphs.append( fields[0] )
values.append( fields[1] )
return glyphs, values
def filter_glyph_names( alist, filter ):
"""filter `alist' by taking _out_ all glyph names that are in `filter'"""
count = 0
extras = []
for name in alist:
try:
filtered_index = filter.index( name )
except:
extras.append( name )
return extras
def dump_encoding( file, encoding_name, encoding_list ):
"""dump a given encoding"""
write = file.write
write( " /* the following are indices into the SID name table */\n" )
write( " static const unsigned short " + encoding_name +
"[" + repr( len( encoding_list ) ) + "] =\n" )
write( " {\n" )
line = " "
comma = ""
col = 0
for value in encoding_list:
line += comma
line += "%3d" % value
comma = ","
col += 1
if col == 16:
col = 0
comma = ",\n "
write( line + "\n };\n\n\n" )
def dump_array( the_array, write, array_name ):
"""dumps a given encoding"""
write( " static const unsigned char " + array_name +
"[" + repr( len( the_array ) ) + "L] =\n" )
write( " {\n" )
line = ""
comma = " "
col = 0
for value in the_array:
line += comma
line += "%3d" % ord( value )
comma = ","
col += 1
if col == 16:
col = 0
comma = ",\n "
if len( line ) > 1024:
write( line )
line = ""
write( line + "\n };\n\n\n" )
def main():
"""main program body"""
if len( sys.argv ) != 2:
print __doc__ % sys.argv[0]
sys.exit( 1 )
file = open( sys.argv[1], "w\n" )
write = file.write
count_sid = len( sid_standard_names )
# `mac_extras' contains the list of glyph names in the Macintosh standard
# encoding which are not in the SID Standard Names.
#
mac_extras = filter_glyph_names( mac_standard_names, sid_standard_names )
# `base_list' contains the names of our final glyph names table.
# It consists of the `mac_extras' glyph names, followed by the SID
# standard names.
#
mac_extras_count = len( mac_extras )
base_list = mac_extras + sid_standard_names
write( "/***************************************************************************/\n" )
write( "/* */\n" )
write( "/* %-71s*/\n" % os.path.basename( sys.argv[1] ) )
write( "/* */\n" )
write( "/* PostScript glyph names. */\n" )
write( "/* */\n" )
write( "/* Copyright 2005, 2008, 2011 by */\n" )
write( "/* David Turner, Robert Wilhelm, and Werner Lemberg. */\n" )
write( "/* */\n" )
write( "/* This file is part of the FreeType project, and may only be used, */\n" )
write( "/* modified, and distributed under the terms of the FreeType project */\n" )
write( "/* license, LICENSE.TXT. By continuing to use, modify, or distribute */\n" )
write( "/* this file you indicate that you have read the license and */\n" )
write( "/* understand and accept it fully. */\n" )
write( "/* */\n" )
write( "/***************************************************************************/\n" )
write( "\n" )
write( "\n" )
write( " /* This file has been generated automatically -- do not edit! */\n" )
write( "\n" )
write( "\n" )
# dump final glyph list (mac extras + sid standard names)
#
st = StringTable( base_list, "ft_standard_glyph_names" )
st.dump( file )
st.dump_sublist( file, "ft_mac_names",
"FT_NUM_MAC_NAMES", mac_standard_names )
st.dump_sublist( file, "ft_sid_names",
"FT_NUM_SID_NAMES", sid_standard_names )
dump_encoding( file, "t1_standard_encoding", t1_standard_encoding )
dump_encoding( file, "t1_expert_encoding", t1_expert_encoding )
# dump the AGL in its compressed form
#
agl_glyphs, agl_values = adobe_glyph_values()
dict = StringNode( "", 0 )
for g in range( len( agl_glyphs ) ):
dict.add( agl_glyphs[g], eval( "0x" + agl_values[g] ) )
dict = dict.optimize()
dict_len = dict.locate( 0 )
dict_array = dict.store( "" )
write( """\
/*
* This table is a compressed version of the Adobe Glyph List (AGL),
* optimized for efficient searching. It has been generated by the
* `glnames.py' python script located in the `src/tools' directory.
*
* The lookup function to get the Unicode value for a given string
* is defined below the table.
*/
#ifdef FT_CONFIG_OPTION_ADOBE_GLYPH_LIST
""" )
dump_array( dict_array, write, "ft_adobe_glyph_list" )
# write the lookup routine now
#
write( """\
/*
* This function searches the compressed table efficiently.
*/
static unsigned long
ft_get_adobe_glyph_index( const char* name,
const char* limit )
{
int c = 0;
int count, min, max;
const unsigned char* p = ft_adobe_glyph_list;
if ( name == 0 || name >= limit )
goto NotFound;
c = *name++;
count = p[1];
p += 2;
min = 0;
max = count;
while ( min < max )
{
int mid = ( min + max ) >> 1;
const unsigned char* q = p + mid * 2;
int c2;
q = ft_adobe_glyph_list + ( ( (int)q[0] << 8 ) | q[1] );
c2 = q[0] & 127;
if ( c2 == c )
{
p = q;
goto Found;
}
if ( c2 < c )
min = mid + 1;
else
max = mid;
}
goto NotFound;
Found:
for (;;)
{
/* assert (*p & 127) == c */
if ( name >= limit )
{
if ( (p[0] & 128) == 0 &&
(p[1] & 128) != 0 )
return (unsigned long)( ( (int)p[2] << 8 ) | p[3] );
goto NotFound;
}
c = *name++;
if ( p[0] & 128 )
{
p++;
if ( c != (p[0] & 127) )
goto NotFound;
continue;
}
p++;
count = p[0] & 127;
if ( p[0] & 128 )
p += 2;
p++;
for ( ; count > 0; count--, p += 2 )
{
int offset = ( (int)p[0] << 8 ) | p[1];
const unsigned char* q = ft_adobe_glyph_list + offset;
if ( c == ( q[0] & 127 ) )
{
p = q;
goto NextIter;
}
}
goto NotFound;
NextIter:
;
}
NotFound:
return 0;
}
#endif /* FT_CONFIG_OPTION_ADOBE_GLYPH_LIST */
""" )
if 0: # generate unit test, or don't
#
# now write the unit test to check that everything works OK
#
write( "#ifdef TEST\n\n" )
write( "static const char* const the_names[] = {\n" )
for name in agl_glyphs:
write( ' "' + name + '",\n' )
write( " 0\n};\n" )
write( "static const unsigned long the_values[] = {\n" )
for val in agl_values:
write( ' 0x' + val + ',\n' )
write( " 0\n};\n" )
write( """
#include <stdlib.h>
#include <stdio.h>
int
main( void )
{
int result = 0;
const char* const* names = the_names;
const unsigned long* values = the_values;
for ( ; *names; names++, values++ )
{
const char* name = *names;
unsigned long reference = *values;
unsigned long value;
value = ft_get_adobe_glyph_index( name, name + strlen( name ) );
if ( value != reference )
{
result = 1;
fprintf( stderr, "name '%s' => %04x instead of %04x\\n",
name, value, reference );
}
}
return result;
}
""" )
write( "#endif /* TEST */\n" )
write("\n/* END */\n")
# Now run the main routine
#
main()
# END
|
gpl-2.0
| -7,631,148,097,224,892,000 | 9,095,245,584,723,159,000 | 18.179697 | 92 | 0.792909 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.