repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
Xion/pelican-plugins
|
sub_parts/sub_parts.py
|
59
|
2671
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from pelican import signals
import logging
logger = logging.getLogger(__name__)
def patch_subparts(generator):
generator.subparts = []
slugs = {}
for article in generator.articles:
slugs[article.slug] = article
if '--' in article.slug:
generator.subparts.append(article)
for article in generator.subparts:
logger.info('sub_part: Detected %s', article.slug)
(pslug, _) = article.slug.rsplit('--', 1)
if pslug in slugs:
parent = slugs[pslug]
if not hasattr(parent, 'subparts'):
parent.subparts = []
parent.subparts.append(article)
article.subpart_of = parent
article.subtitle = article.title
article.title = article.title + ", " + parent.title
generator.dates.remove(article)
generator.articles.remove(article)
if article.category:
for cat, arts in generator.categories:
if cat.name == article.category.name:
arts.remove(article)
break
else:
logger.error(
'sub_part: Cannot remove sub-part from category %s',
article.category)
if (hasattr(article, 'subphotos') or
hasattr(article, 'photo_gallery')):
parent.subphotos = (
getattr(parent, 'subphotos',
len(getattr(parent, 'photo_gallery', []))) +
getattr(article, 'subphotos', 0) +
len(getattr(article, 'photo_gallery', [])))
else:
logger.error('sub_part: No parent for %s', pslug)
generator._update_context(('articles', 'dates', 'subparts'))
def write_subparts(generator, writer):
for article in generator.subparts:
signals.article_generator_write_article.send(generator,
content=article)
writer.write_file(
article.save_as, generator.get_template(article.template),
generator.context, article=article, category=article.category,
override_output=hasattr(article, 'override_save_as'),
relative_urls=generator.settings['RELATIVE_URLS'])
if len(generator.subparts) > 0:
print('sub_part: processed {} sub-parts.'.format(
len(generator.subparts)))
def register():
signals.article_generator_finalized.connect(patch_subparts)
signals.article_writer_finalized.connect(write_subparts)
|
agpl-3.0
|
meghabhoj/NEWBAZAAR
|
bazaar/listings/urls.py
|
2
|
1143
|
from __future__ import unicode_literals
from django.conf.urls import patterns, url
from .views import ListingListView, ListingDetailView, ListingUpdateView, ListingDeleteView, PublishingListView, \
PublishingCreateView, PublishingUpdateView, PublishingDeleteView
urlpatterns = patterns(
'',
url(r'^listings/$', ListingListView.as_view(), name="listing-list"),
url(r'^listings/new/$', ListingUpdateView.as_view(), name="listing-create"),
url(r'^listings/(?P<pk>\d+)/$', ListingDetailView.as_view(), name='listing-detail'),
url(r'^listings/update/(?P<pk>\d+)/$', ListingUpdateView.as_view(), name='listing-update'),
url(r'^listings/delete/(?P<pk>\d+)/$', ListingDeleteView.as_view(), name='listing-delete'),
# Publishing view
url(r'^publishings/$', PublishingListView.as_view(), name="publishing-list"),
url(r'^publishings/new/$', PublishingCreateView.as_view(), name="publishing-create"),
url(r'^publishings/update/(?P<pk>\d+)/$', PublishingUpdateView.as_view(), name='publishing-update'),
url(r'^publishings/delete/(?P<pk>\d+)/$', PublishingDeleteView.as_view(), name='publishing-delete'),
)
|
bsd-2-clause
|
mozilla/kitsune
|
kitsune/kpi/management/commands/update_l10n_metric.py
|
1
|
2630
|
from datetime import date, timedelta
from django.conf import settings
from django.core.management.base import BaseCommand
from kitsune.kpi.management import utils
from kitsune.kpi.models import L10N_METRIC_CODE, Metric, MetricKind
from kitsune.sumo import googleanalytics
class Command(BaseCommand):
help = "Calculate new l10n coverage numbers and save."
def handle(self, **options):
"""
L10n coverage is a measure of the amount of translations that are
up to date, weighted by the number of visits for each locale.
The "algorithm" (see Bug 727084):
SUMO visits = Total SUMO visits for the last 30 days;
Total translated = 0;
For each locale {
Total up to date = Total up to date +
((Number of up to date articles in the en-US top 50 visited)/50 ) *
(Visitors for that locale / SUMO visits));
}
An up to date article is any of the following:
* An en-US article (by definition it is always up to date)
* The latest en-US revision has been translated
* There are only new revisions with TYPO_SIGNIFICANCE not translated
* There is only one revision of MEDIUM_SIGNIFICANCE not translated
"""
# Get the top 60 visited articles. We will only use the top 50
# but a handful aren't localizable so we get some extras.
top_60_docs = utils._get_top_docs(60)
# Get the visits to each locale in the last 30 days.
end = date.today() - timedelta(days=1) # yesterday
start = end - timedelta(days=30)
locale_visits = googleanalytics.visitors_by_locale(start, end)
# Total visits.
total_visits = sum(locale_visits.values())
# Calculate the coverage.
coverage = 0
for locale, visits in locale_visits.items():
if locale == settings.WIKI_DEFAULT_LANGUAGE:
num_docs = utils.MAX_DOCS_UP_TO_DATE
up_to_date_docs = utils.MAX_DOCS_UP_TO_DATE
else:
up_to_date_docs, num_docs = utils._get_up_to_date_count(top_60_docs, locale)
if num_docs and total_visits:
coverage += (float(up_to_date_docs) / num_docs) * (float(visits) / total_visits)
# Save the value to Metric table.
metric_kind = MetricKind.objects.get_or_create(code=L10N_METRIC_CODE)[0]
day = date.today()
Metric.objects.create(
kind=metric_kind,
start=day,
end=day + timedelta(days=1),
value=int(coverage * 100),
) # Store as a % int.
|
bsd-3-clause
|
bopo/tablib
|
tablib/packages/xlwt3/ExcelFormulaLexer.py
|
46
|
4311
|
import sys
from .antlr import EOF, CommonToken as Tok, TokenStream, TokenStreamException
import struct
from . import ExcelFormulaParser
from re import compile as recompile, match, LOCALE, UNICODE, IGNORECASE, VERBOSE
int_const_pattern = r"\d+\b"
flt_const_pattern = r"""
(?:
(?: \d* \. \d+ ) # .1 .12 .123 etc 9.1 etc 98.1 etc
|
(?: \d+ \. ) # 1. 12. 123. etc
)
# followed by optional exponent part
(?: [Ee] [+-]? \d+ ) ?
"""
str_const_pattern = r'"(?:[^"]|"")*"'
#range2d_pattern = recompile(r"\$?[A-I]?[A-Z]\$?\d+:\$?[A-I]?[A-Z]\$?\d+"
ref2d_r1c1_pattern = r"[Rr]0*[1-9][0-9]*[Cc]0*[1-9][0-9]*"
ref2d_pattern = r"\$?[A-I]?[A-Z]\$?0*[1-9][0-9]*"
true_pattern = r"TRUE\b"
false_pattern = r"FALSE\b"
if_pattern = r"IF\b"
choose_pattern = r"CHOOSE\b"
name_pattern = r"\w[\.\w]*"
quotename_pattern = r"'(?:[^']|'')*'" #### It's essential that this bracket be non-grouping.
ne_pattern = r"<>"
ge_pattern = r">="
le_pattern = r"<="
pattern_type_tuples = (
(flt_const_pattern, ExcelFormulaParser.NUM_CONST),
(int_const_pattern, ExcelFormulaParser.INT_CONST),
(str_const_pattern, ExcelFormulaParser.STR_CONST),
# (range2d_pattern , ExcelFormulaParser.RANGE2D),
(ref2d_r1c1_pattern, ExcelFormulaParser.REF2D_R1C1),
(ref2d_pattern , ExcelFormulaParser.REF2D),
(true_pattern , ExcelFormulaParser.TRUE_CONST),
(false_pattern , ExcelFormulaParser.FALSE_CONST),
(if_pattern , ExcelFormulaParser.FUNC_IF),
(choose_pattern , ExcelFormulaParser.FUNC_CHOOSE),
(name_pattern , ExcelFormulaParser.NAME),
(quotename_pattern, ExcelFormulaParser.QUOTENAME),
(ne_pattern, ExcelFormulaParser.NE),
(ge_pattern, ExcelFormulaParser.GE),
(le_pattern, ExcelFormulaParser.LE),
)
_re = recompile(
'(' + ')|('.join([i[0] for i in pattern_type_tuples]) + ')',
VERBOSE+LOCALE+IGNORECASE)
_toktype = [None] + [i[1] for i in pattern_type_tuples]
# need dummy at start because re.MatchObject.lastindex counts from 1
single_char_lookup = {
'=': ExcelFormulaParser.EQ,
'<': ExcelFormulaParser.LT,
'>': ExcelFormulaParser.GT,
'+': ExcelFormulaParser.ADD,
'-': ExcelFormulaParser.SUB,
'*': ExcelFormulaParser.MUL,
'/': ExcelFormulaParser.DIV,
':': ExcelFormulaParser.COLON,
';': ExcelFormulaParser.SEMICOLON,
',': ExcelFormulaParser.COMMA,
'(': ExcelFormulaParser.LP,
')': ExcelFormulaParser.RP,
'&': ExcelFormulaParser.CONCAT,
'%': ExcelFormulaParser.PERCENT,
'^': ExcelFormulaParser.POWER,
'!': ExcelFormulaParser.BANG,
}
class Lexer(TokenStream):
def __init__(self, text):
self._text = text[:]
self._pos = 0
self._line = 0
def isEOF(self):
return len(self._text) <= self._pos
def curr_ch(self):
return self._text[self._pos]
def next_ch(self, n = 1):
self._pos += n
def is_whitespace(self):
return self.curr_ch() in " \t\n\r\f\v"
def match_pattern(self):
m = _re.match(self._text, self._pos)
if not m:
return None
self._pos = m.end(0)
return Tok(type = _toktype[m.lastindex], text = m.group(0), col = m.start(0) + 1)
def nextToken(self):
# skip whitespace
while not self.isEOF() and self.is_whitespace():
self.next_ch()
if self.isEOF():
return Tok(type = EOF)
# first, try to match token with 2 or more chars
t = self.match_pattern()
if t:
return t
# second, we want 1-char tokens
te = self.curr_ch()
try:
ty = single_char_lookup[te]
except KeyError:
raise TokenStreamException(
"Unexpected char %r in column %u." % (self.curr_ch(), self._pos))
self.next_ch()
return Tok(type=ty, text=te, col=self._pos)
if __name__ == '__main__':
try:
for t in Lexer(""" 1.23 456 "abcd" R2C2 a1 iv65536 true false if choose a_name 'qname' <> >= <= """):
print(t)
except TokenStreamException as e:
print("error:", e)
|
mit
|
rosmo/aurora
|
src/test/python/apache/aurora/client/test_base.py
|
3
|
3152
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from apache.aurora.client import base
from gen.apache.aurora.api.ttypes import (
PopulateJobResult,
Response,
ResponseCode,
ResponseDetail,
Result,
TaskConfig
)
class TestBase(unittest.TestCase):
def test_format_response_with_message(self):
resp = Response(responseCode=ResponseCode.ERROR, details=[ResponseDetail(message='Error')])
formatted = base.format_response(resp)
assert formatted == 'Response from scheduler: ERROR (message: Error)'
def test_format_response_with_details(self):
resp = Response(responseCode=ResponseCode.ERROR, details=[ResponseDetail(message='Error')])
formatted = base.format_response(resp)
assert formatted == 'Response from scheduler: ERROR (message: Error)'
def test_combine_messages(self):
resp = Response(responseCode=ResponseCode.ERROR)
assert base.combine_messages(resp) == ''
resp = Response(responseCode=ResponseCode.ERROR, details=[])
assert base.combine_messages(resp) == ''
resp = Response(responseCode=ResponseCode.ERROR, details=[ResponseDetail(message='Error')])
assert base.combine_messages(resp) == 'Error'
resp = Response(responseCode=ResponseCode.ERROR, details=[ResponseDetail()])
assert base.combine_messages(resp) == 'Unknown error'
resp = Response(
responseCode=ResponseCode.ERROR,
details=[ResponseDetail(message='Error1'), ResponseDetail(message='Error2')])
assert base.combine_messages(resp) == 'Error1, Error2'
def test_get_populated_task_config_set(self):
config = TaskConfig()
resp = Response(responseCode=ResponseCode.OK, result=Result(populateJobResult=PopulateJobResult(
taskConfig=config)))
assert config == resp.result.populateJobResult.taskConfig
def test_synthesize_url(self):
base_url = 'http://example.com'
role = 'some-role'
environment = 'some-environment'
job = 'some-job'
update_id = 'some-update-id'
assert (('%s/scheduler/%s/%s/%s/update/%s' % (base_url, role, environment, job, update_id)) ==
base.synthesize_url(base_url, role, environment, job, update_id=update_id))
assert (('%s/scheduler/%s/%s/%s' % (base_url, role, environment, job)) ==
base.synthesize_url(base_url, role, environment, job))
assert (('%s/scheduler/%s/%s' % (base_url, role, environment)) ==
base.synthesize_url(base_url, role, environment))
assert (('%s/scheduler/%s' % (base_url, role)) ==
base.synthesize_url(base_url, role))
assert (('%s/scheduler/%s' % (base_url, role)) ==
base.synthesize_url(base_url, role))
|
apache-2.0
|
jazzmes/pyroute2
|
tests/test_tc.py
|
2
|
7186
|
import socket
from utils import require_user
from pyroute2 import IPRoute
from pyroute2 import protocols
from pyroute2.netlink import NetlinkError
from pyroute2.iproute import RTM_NEWQDISC
from pyroute2.iproute import RTM_NEWTFILTER
from pyroute2.iproute import RTM_NEWTCLASS
from pyroute2.iproute import TC_H_INGRESS
from nose.plugins.skip import SkipTest
def try_qd(qd, call, *argv, **kwarg):
try:
call(*argv, **kwarg)
except NetlinkError as e:
# code 2 'no such file or directory)
if e.code == 2:
raise SkipTest('missing traffic control <%s>' % (qd))
raise
class BasicTest(object):
def setup(self):
require_user('root')
self.ip = IPRoute()
self.ip.link('add',
index=0,
ifname='dummyX',
linkinfo={'attrs': [['IFLA_INFO_KIND', 'dummy']]})
self.interface = self.ip.link_lookup(ifname='dummyX')[0]
def teardown(self):
self.ip.link('delete', index=self.interface)
self.ip.close()
def get_qdiscs(self):
return [x for x in self.ip.get_qdiscs() if
x['index'] == self.interface]
def get_qdisc(self):
# get qdiscs list and filter out our interface
qds = self.get_qdiscs()
if qds:
return qds[0]
else:
return None
class TestIngress(BasicTest):
def test_simple(self):
self.ip.tc(RTM_NEWQDISC, 'ingress', self.interface, 0xffff0000)
qds = self.get_qdisc()
# assert the list is not empty
assert qds
# assert there is the ingress queue
assert qds.get_attr('TCA_KIND') == 'ingress'
# assert it has proper handle and parent
assert qds['handle'] == 0xffff0000
assert qds['parent'] == TC_H_INGRESS
def test_filter(self):
self.test_simple()
self.ip.tc(RTM_NEWTFILTER, 'u32', self.interface, 0,
protocol=socket.AF_INET,
parent=0xffff0000,
action='drop',
target=0x1,
rate='10kbit',
burst=10240,
limit=0,
prio=50,
keys=['0x0/0x0+12'])
fls = self.ip.get_filters(index=self.interface, parent=0xffff0000)
# assert there are filters
assert fls
# assert there is one police rule:
prs = [x for x in fls
if x.get_attr('TCA_OPTIONS') is not None and
(x.get_attr('TCA_OPTIONS').get_attr('TCA_U32_POLICE')
is not None or
x.get_attr('TCA_OPTIONS').get_attr('TCA_U32_ACT')
is not None)][0]
# assert the police rule has specified parameters
options = prs.get_attr('TCA_OPTIONS')
police_u32 = options.get_attr('TCA_U32_POLICE')
# on modern kernels there is no TCA_U32_POLICE under
# TCA_OPTIONS, but there is TCA_U32_ACT
if police_u32 is None:
police_u32 = options.get_attr('TCA_U32_ACT').\
get_attr('TCA_ACT_PRIO_0').\
get_attr('TCA_ACT_OPTIONS')
police_tbf = police_u32.get_attr('TCA_POLICE_TBF')
assert police_tbf['rate'] == 1250
assert police_tbf['mtu'] == 2040
class TestPfifo(BasicTest):
def test_pfifo(self):
try_qd('pfifo_fast', self.ip.tc,
RTM_NEWQDISC, 'pfifo_fast', self.interface, 0)
qds = self.get_qdisc()
assert qds
assert qds.get_attr('TCA_KIND') == 'pfifo_fast'
assert isinstance(qds.get_attr('TCA_OPTIONS')['priomap'], tuple)
class TestSfq(BasicTest):
def test_sfq(self):
try_qd('sfq', self.ip.tc,
RTM_NEWQDISC, 'sfq', self.interface, 0, perturb=10)
qds = self.get_qdisc()
assert qds
assert qds.get_attr('TCA_KIND') == 'sfq'
assert qds.get_attr('TCA_OPTIONS')['perturb_period'] == 10
class TestTbf(BasicTest):
def test_tbf(self):
try_qd('tbf', self.ip.tc,
RTM_NEWQDISC, 'tbf', self.interface, 0,
rate='220kbit',
latency='50ms',
burst=1540)
qds = self.get_qdisc()
assert qds
assert qds.get_attr('TCA_KIND') == 'tbf'
parms = qds.get_attr('TCA_OPTIONS').get_attr('TCA_TBF_PARMS')
assert parms
assert parms['rate'] == 27500
class TestHtb(BasicTest):
def test_htb(self):
# 8<-----------------------------------------------------
# root queue, '1:0' handle notation
try_qd('htb', self.ip.tc,
RTM_NEWQDISC, 'htb', self.interface, '1:',
default='20:0')
qds = self.get_qdiscs()
assert len(qds) == 1
assert qds[0].get_attr('TCA_KIND') == 'htb'
# 8<-----------------------------------------------------
# classes, both string and int handle notation
try_qd('htb', self.ip.tc,
RTM_NEWTCLASS, 'htb', self.interface, '1:1',
parent='1:0',
rate='256kbit',
burst=1024 * 6)
try_qd('htb', self.ip.tc,
RTM_NEWTCLASS, 'htb', self.interface, 0x10010,
parent=0x10001,
rate='192kbit',
burst=1024 * 6,
prio=1)
try_qd('htb', self.ip.tc,
RTM_NEWTCLASS, 'htb', self.interface, '1:20',
parent='1:1',
rate='128kbit',
burst=1024 * 6,
prio=2)
cls = self.ip.get_classes(index=self.interface)
assert len(cls) == 3
# 8<-----------------------------------------------------
# leaves, both string and int handle notation
try_qd('sfq', self.ip.tc,
RTM_NEWQDISC, 'sfq', self.interface, '10:',
parent='1:10',
perturb=10)
try_qd('sfq', self.ip.tc,
RTM_NEWQDISC, 'sfq', self.interface, 0x200000,
parent=0x10020,
perturb=10)
qds = self.get_qdiscs()
types = set([x.get_attr('TCA_KIND') for x in qds])
assert types == set(('htb', 'sfq'))
# 8<-----------------------------------------------------
# filters, both string and int handle notation
#
# Please note, that u32 filter requires ethernet protocol
# numbers, as defined in protocols module. Do not provide
# here socket.AF_INET and so on.
#
try_qd('u32', self.ip.tc,
RTM_NEWTFILTER, 'u32', self.interface, '0:0',
parent='1:0',
prio=10,
protocol=protocols.ETH_P_IP,
target='1:10',
keys=['0x0006/0x00ff+8', '0x0000/0xffc0+2'])
try_qd('u32', self.ip.tc,
RTM_NEWTFILTER, 'u32', self.interface, 0,
parent=0x10000,
prio=10,
protocol=protocols.ETH_P_IP,
target=0x10020,
keys=['0x5/0xf+0', '0x10/0xff+33'])
# 2 filters + 2 autogenerated
fls = self.ip.get_filters(index=self.interface)
assert len(fls) == 4
|
gpl-2.0
|
raysguy/HTPC-Manager
|
libs/requests/packages/chardet/escprober.py
|
2936
|
3187
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .escsm import (HZSMModel, ISO2022CNSMModel, ISO2022JPSMModel,
ISO2022KRSMModel)
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .compat import wrap_ord
class EscCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = [
CodingStateMachine(HZSMModel),
CodingStateMachine(ISO2022CNSMModel),
CodingStateMachine(ISO2022JPSMModel),
CodingStateMachine(ISO2022KRSMModel)
]
self.reset()
def reset(self):
CharSetProber.reset(self)
for codingSM in self._mCodingSM:
if not codingSM:
continue
codingSM.active = True
codingSM.reset()
self._mActiveSM = len(self._mCodingSM)
self._mDetectedCharset = None
def get_charset_name(self):
return self._mDetectedCharset
def get_confidence(self):
if self._mDetectedCharset:
return 0.99
else:
return 0.00
def feed(self, aBuf):
for c in aBuf:
# PY3K: aBuf is a byte array, so c is an int, not a byte
for codingSM in self._mCodingSM:
if not codingSM:
continue
if not codingSM.active:
continue
codingState = codingSM.next_state(wrap_ord(c))
if codingState == constants.eError:
codingSM.active = False
self._mActiveSM -= 1
if self._mActiveSM <= 0:
self._mState = constants.eNotMe
return self.get_state()
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
self._mDetectedCharset = codingSM.get_coding_state_machine() # nopep8
return self.get_state()
return self.get_state()
|
mit
|
tudorvio/tempest
|
tempest/services/compute/json/extensions_client.py
|
9
|
1337
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils as json
from tempest.api_schema.response.compute.v2_1 import extensions as schema
from tempest.common import service_client
class ExtensionsClient(service_client.ServiceClient):
def list_extensions(self):
url = 'extensions'
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(schema.list_extensions, resp, body)
return service_client.ResponseBodyList(resp, body['extensions'])
def show_extension(self, extension_alias):
resp, body = self.get('extensions/%s' % extension_alias)
body = json.loads(body)
return service_client.ResponseBody(resp, body['extension'])
|
apache-2.0
|
stevenewey/django
|
django/views/decorators/http.py
|
144
|
7467
|
"""
Decorators for views based on HTTP headers.
"""
import logging
from calendar import timegm
from functools import wraps
from django.http import (
HttpResponse, HttpResponseNotAllowed, HttpResponseNotModified,
)
from django.middleware.http import ConditionalGetMiddleware
from django.utils.decorators import available_attrs, decorator_from_middleware
from django.utils.http import (
http_date, parse_etags, parse_http_date_safe, quote_etag,
)
conditional_page = decorator_from_middleware(ConditionalGetMiddleware)
logger = logging.getLogger('django.request')
def require_http_methods(request_method_list):
"""
Decorator to make a view only accept particular request methods. Usage::
@require_http_methods(["GET", "POST"])
def my_view(request):
# I can assume now that only GET or POST requests make it this far
# ...
Note that request methods should be in uppercase.
"""
def decorator(func):
@wraps(func, assigned=available_attrs(func))
def inner(request, *args, **kwargs):
if request.method not in request_method_list:
logger.warning('Method Not Allowed (%s): %s', request.method, request.path,
extra={
'status_code': 405,
'request': request
}
)
return HttpResponseNotAllowed(request_method_list)
return func(request, *args, **kwargs)
return inner
return decorator
require_GET = require_http_methods(["GET"])
require_GET.__doc__ = "Decorator to require that a view only accepts the GET method."
require_POST = require_http_methods(["POST"])
require_POST.__doc__ = "Decorator to require that a view only accepts the POST method."
require_safe = require_http_methods(["GET", "HEAD"])
require_safe.__doc__ = "Decorator to require that a view only accepts safe methods: GET and HEAD."
def _precondition_failed(request):
logger.warning('Precondition Failed: %s', request.path,
extra={
'status_code': 412,
'request': request
},
)
return HttpResponse(status=412)
def condition(etag_func=None, last_modified_func=None):
"""
Decorator to support conditional retrieval (or change) for a view
function.
The parameters are callables to compute the ETag and last modified time for
the requested resource, respectively. The callables are passed the same
parameters as the view itself. The Etag function should return a string (or
None if the resource doesn't exist), whilst the last_modified function
should return a datetime object (or None if the resource doesn't exist).
If both parameters are provided, all the preconditions must be met before
the view is processed.
This decorator will either pass control to the wrapped view function or
return an HTTP 304 response (unmodified) or 412 response (preconditions
failed), depending upon the request method.
Any behavior marked as "undefined" in the HTTP spec (e.g. If-none-match
plus If-modified-since headers) will result in the view function being
called.
"""
def decorator(func):
@wraps(func, assigned=available_attrs(func))
def inner(request, *args, **kwargs):
# Get HTTP request headers
if_modified_since = request.META.get("HTTP_IF_MODIFIED_SINCE")
if if_modified_since:
if_modified_since = parse_http_date_safe(if_modified_since)
if_unmodified_since = request.META.get("HTTP_IF_UNMODIFIED_SINCE")
if if_unmodified_since:
if_unmodified_since = parse_http_date_safe(if_unmodified_since)
if_none_match = request.META.get("HTTP_IF_NONE_MATCH")
if_match = request.META.get("HTTP_IF_MATCH")
etags = []
if if_none_match or if_match:
# There can be more than one ETag in the request, so we
# consider the list of values.
try:
etags = parse_etags(if_none_match or if_match)
except ValueError:
# In case of invalid etag ignore all ETag headers.
# Apparently Opera sends invalidly quoted headers at times
# (we should be returning a 400 response, but that's a
# little extreme) -- this is Django bug #10681.
if_none_match = None
if_match = None
# Compute values (if any) for the requested resource.
def get_last_modified():
if last_modified_func:
dt = last_modified_func(request, *args, **kwargs)
if dt:
return timegm(dt.utctimetuple())
res_etag = etag_func(request, *args, **kwargs) if etag_func else None
res_last_modified = get_last_modified()
response = None
if not ((if_match and if_modified_since) or
(if_none_match and if_unmodified_since) or
(if_modified_since and if_unmodified_since) or
(if_match and if_none_match)):
# We only get here if no undefined combinations of headers are
# specified.
if ((if_none_match and (res_etag in etags or
"*" in etags and res_etag)) and
(not if_modified_since or
(res_last_modified and if_modified_since and
res_last_modified <= if_modified_since))):
if request.method in ("GET", "HEAD"):
response = HttpResponseNotModified()
else:
response = _precondition_failed(request)
elif (if_match and ((not res_etag and "*" in etags) or
(res_etag and res_etag not in etags) or
(res_last_modified and if_unmodified_since and
res_last_modified > if_unmodified_since))):
response = _precondition_failed(request)
elif (not if_none_match and request.method in ("GET", "HEAD") and
res_last_modified and if_modified_since and
res_last_modified <= if_modified_since):
response = HttpResponseNotModified()
elif (not if_match and
res_last_modified and if_unmodified_since and
res_last_modified > if_unmodified_since):
response = _precondition_failed(request)
if response is None:
response = func(request, *args, **kwargs)
# Set relevant headers on the response if they don't already exist.
if res_last_modified and not response.has_header('Last-Modified'):
response['Last-Modified'] = http_date(res_last_modified)
if res_etag and not response.has_header('ETag'):
response['ETag'] = quote_etag(res_etag)
return response
return inner
return decorator
# Shortcut decorators for common cases based on ETag or Last-Modified only
def etag(etag_func):
return condition(etag_func=etag_func)
def last_modified(last_modified_func):
return condition(last_modified_func=last_modified_func)
|
bsd-3-clause
|
srottem/indy-sdk
|
docs/how-tos/write-did-and-query-verkey/python/write_did_and_query_verkey.py
|
2
|
6316
|
"""
Example demonstrating how to add DID with the role of Trust Anchor as Steward.
Uses seed to obtain Steward's DID which already exists on the ledger.
Then it generates new DID/Verkey pair for Trust Anchor.
Using Steward's DID, NYM transaction request is built to add Trust Anchor's DID and Verkey
on the ledger with the role of Trust Anchor.
Once the NYM is successfully written on the ledger, it generates new DID/Verkey pair that represents
a client, which are used to create GET_NYM request to query the ledger and confirm Trust Anchor's Verkey.
For the sake of simplicity, a single wallet is used. In the real world scenario, three different wallets
would be used and DIDs would be exchanged using some channel of communication
"""
import asyncio
import json
import pprint
from indy import pool, ledger, wallet, did
from indy.error import IndyError
pool_name = 'pool'
wallet_name = 'wallet'
genesis_file_path = '/home/vagrant/code/evernym/indy-sdk/cli/docker_pool_transactions_genesis'
wallet_credentials = json.dumps({"key": "wallet_key"})
def print_log(value_color="", value_noncolor=""):
"""set the colors for text."""
HEADER = '\033[92m'
ENDC = '\033[0m'
print(HEADER + value_color + ENDC + str(value_noncolor))
async def write_nym_and_query_verkey():
try:
# 1.
print_log('\n1. Creates a new local pool ledger configuration that is used '
'later when connecting to ledger.\n')
pool_config = json.dumps({'genesis_txn': genesis_file_path})
await pool.create_pool_ledger_config(config_name=pool_name, config=pool_config)
# 2.
print_log('\n2. Open pool ledger and get handle from libindy\n')
pool_handle = await pool.open_pool_ledger(config_name=pool_name, config=None)
# 3.
print_log('\n3. Creating new secure wallet\n')
await wallet.create_wallet(pool_name, wallet_name, None, None, wallet_credentials)
# 4.
print_log('\n4. Open wallet and get handle from libindy\n')
wallet_handle = await wallet.open_wallet(wallet_name, None, wallet_credentials)
# 5.
print_log('\n5. Generating and storing steward DID and verkey\n')
steward_seed = '000000000000000000000000Steward1'
did_json = json.dumps({'seed': steward_seed})
steward_did, steward_verkey = await did.create_and_store_my_did(wallet_handle, did_json)
print_log('Steward DID: ', steward_did)
print_log('Steward Verkey: ', steward_verkey)
# 6.
print_log('\n6. Generating and storing trust anchor DID and verkey\n')
trust_anchor_did, trust_anchor_verkey = await did.create_and_store_my_did(wallet_handle, "{}")
print_log('Trust anchor DID: ', trust_anchor_did)
print_log('Trust anchor Verkey: ', trust_anchor_verkey)
# 7.
print_log('\n7. Building NYM request to add Trust Anchor to the ledger\n')
nym_transaction_request = await ledger.build_nym_request(submitter_did=steward_did,
target_did=trust_anchor_did,
ver_key=trust_anchor_verkey,
alias=None,
role='TRUST_ANCHOR')
print_log('NYM transaction request: ')
pprint.pprint(json.loads(nym_transaction_request))
# 8.
print_log('\n8. Sending NYM request to the ledger\n')
nym_transaction_response = await ledger.sign_and_submit_request(pool_handle=pool_handle,
wallet_handle=wallet_handle,
submitter_did=steward_did,
request_json=nym_transaction_request)
print_log('NYM transaction response: ')
pprint.pprint(json.loads(nym_transaction_response))
# 9.
print_log('\n9. Generating and storing DID and verkey representing a Client '
'that wants to obtain Trust Anchor Verkey\n')
client_did, client_verkey = await did.create_and_store_my_did(wallet_handle, "{}")
print_log('Client DID: ', client_did)
print_log('Client Verkey: ', client_verkey)
# 10.
print_log('\n10. Building the GET_NYM request to query trust anchor verkey\n')
get_nym_request = await ledger.build_get_nym_request(submitter_did=client_did,
target_did=trust_anchor_did)
print_log('GET_NYM request: ')
pprint.pprint(json.loads(get_nym_request))
# 11.
print_log('\n11. Sending the Get NYM request to the ledger\n')
get_nym_response_json = await ledger.submit_request(pool_handle=pool_handle,
request_json=get_nym_request)
get_nym_response = json.loads(get_nym_response_json)
print_log('GET_NYM response: ')
pprint.pprint(get_nym_response)
# 12.
print_log('\n12. Comparing Trust Anchor verkey as written by Steward and as retrieved in GET_NYM '
'response submitted by Client\n')
print_log('Written by Steward: ', trust_anchor_verkey)
verkey_from_ledger = json.loads(get_nym_response['result']['data'])['verkey']
print_log('Queried from ledger: ', verkey_from_ledger)
print_log('Matching: ', verkey_from_ledger == trust_anchor_verkey)
# 13.
print_log('\n13. Closing wallet and pool\n')
await wallet.close_wallet(wallet_handle)
await pool.close_pool_ledger(pool_handle)
# 14.
print_log('\n14. Deleting created wallet\n')
await wallet.delete_wallet(wallet_name, wallet_credentials)
# 15.
print_log('\n15. Deleting pool ledger config\n')
await pool.delete_pool_ledger_config(pool_name)
except IndyError as e:
print('Error occurred: %s' % e)
def main():
loop = asyncio.get_event_loop()
loop.run_until_complete(write_nym_and_query_verkey())
loop.close()
if __name__ == '__main__':
main()
|
apache-2.0
|
lucashmorais/x-Bench
|
mozmill-env/python/Lib/site-packages/hgext/convert/bzr.py
|
94
|
11295
|
# bzr.py - bzr support for the convert extension
#
# Copyright 2008, 2009 Marek Kubica <[email protected]> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
# This module is for handling 'bzr', that was formerly known as Bazaar-NG;
# it cannot access 'bar' repositories, but they were never used very much
import os
from mercurial import demandimport
# these do not work with demandimport, blacklist
demandimport.ignore.extend([
'bzrlib.transactions',
'bzrlib.urlutils',
'ElementPath',
])
from mercurial.i18n import _
from mercurial import util
from common import NoRepo, commit, converter_source
try:
# bazaar imports
from bzrlib import bzrdir, revision, errors
from bzrlib.revisionspec import RevisionSpec
except ImportError:
pass
supportedkinds = ('file', 'symlink')
class bzr_source(converter_source):
"""Reads Bazaar repositories by using the Bazaar Python libraries"""
def __init__(self, ui, path, rev=None):
super(bzr_source, self).__init__(ui, path, rev=rev)
if not os.path.exists(os.path.join(path, '.bzr')):
raise NoRepo(_('%s does not look like a Bazaar repository')
% path)
try:
# access bzrlib stuff
bzrdir
except NameError:
raise NoRepo(_('Bazaar modules could not be loaded'))
path = os.path.abspath(path)
self._checkrepotype(path)
try:
self.sourcerepo = bzrdir.BzrDir.open(path).open_repository()
except errors.NoRepositoryPresent:
raise NoRepo(_('%s does not look like a Bazaar repository')
% path)
self._parentids = {}
def _checkrepotype(self, path):
# Lightweight checkouts detection is informational but probably
# fragile at API level. It should not terminate the conversion.
try:
from bzrlib import bzrdir
dir = bzrdir.BzrDir.open_containing(path)[0]
try:
tree = dir.open_workingtree(recommend_upgrade=False)
branch = tree.branch
except (errors.NoWorkingTree, errors.NotLocalUrl):
tree = None
branch = dir.open_branch()
if (tree is not None and tree.bzrdir.root_transport.base !=
branch.bzrdir.root_transport.base):
self.ui.warn(_('warning: lightweight checkouts may cause '
'conversion failures, try with a regular '
'branch instead.\n'))
except Exception:
self.ui.note(_('bzr source type could not be determined\n'))
def before(self):
"""Before the conversion begins, acquire a read lock
for all the operations that might need it. Fortunately
read locks don't block other reads or writes to the
repository, so this shouldn't have any impact on the usage of
the source repository.
The alternative would be locking on every operation that
needs locks (there are currently two: getting the file and
getting the parent map) and releasing immediately after,
but this approach can take even 40% longer."""
self.sourcerepo.lock_read()
def after(self):
self.sourcerepo.unlock()
def _bzrbranches(self):
return self.sourcerepo.find_branches(using=True)
def getheads(self):
if not self.rev:
# Set using=True to avoid nested repositories (see issue3254)
heads = sorted([b.last_revision() for b in self._bzrbranches()])
else:
revid = None
for branch in self._bzrbranches():
try:
r = RevisionSpec.from_string(self.rev)
info = r.in_history(branch)
except errors.BzrError:
pass
revid = info.rev_id
if revid is None:
raise util.Abort(_('%s is not a valid revision') % self.rev)
heads = [revid]
# Empty repositories return 'null:', which cannot be retrieved
heads = [h for h in heads if h != 'null:']
return heads
def getfile(self, name, rev):
revtree = self.sourcerepo.revision_tree(rev)
fileid = revtree.path2id(name.decode(self.encoding or 'utf-8'))
kind = None
if fileid is not None:
kind = revtree.kind(fileid)
if kind not in supportedkinds:
# the file is not available anymore - was deleted
raise IOError(_('%s is not available in %s anymore') %
(name, rev))
mode = self._modecache[(name, rev)]
if kind == 'symlink':
target = revtree.get_symlink_target(fileid)
if target is None:
raise util.Abort(_('%s.%s symlink has no target')
% (name, rev))
return target, mode
else:
sio = revtree.get_file(fileid)
return sio.read(), mode
def getchanges(self, version):
# set up caches: modecache and revtree
self._modecache = {}
self._revtree = self.sourcerepo.revision_tree(version)
# get the parentids from the cache
parentids = self._parentids.pop(version)
# only diff against first parent id
prevtree = self.sourcerepo.revision_tree(parentids[0])
return self._gettreechanges(self._revtree, prevtree)
def getcommit(self, version):
rev = self.sourcerepo.get_revision(version)
# populate parent id cache
if not rev.parent_ids:
parents = []
self._parentids[version] = (revision.NULL_REVISION,)
else:
parents = self._filterghosts(rev.parent_ids)
self._parentids[version] = parents
branch = self.recode(rev.properties.get('branch-nick', u'default'))
if branch == 'trunk':
branch = 'default'
return commit(parents=parents,
date='%d %d' % (rev.timestamp, -rev.timezone),
author=self.recode(rev.committer),
desc=self.recode(rev.message),
branch=branch,
rev=version)
def gettags(self):
bytetags = {}
for branch in self._bzrbranches():
if not branch.supports_tags():
return {}
tagdict = branch.tags.get_tag_dict()
for name, rev in tagdict.iteritems():
bytetags[self.recode(name)] = rev
return bytetags
def getchangedfiles(self, rev, i):
self._modecache = {}
curtree = self.sourcerepo.revision_tree(rev)
if i is not None:
parentid = self._parentids[rev][i]
else:
# no parent id, get the empty revision
parentid = revision.NULL_REVISION
prevtree = self.sourcerepo.revision_tree(parentid)
changes = [e[0] for e in self._gettreechanges(curtree, prevtree)[0]]
return changes
def _gettreechanges(self, current, origin):
revid = current._revision_id
changes = []
renames = {}
seen = set()
# Process the entries by reverse lexicographic name order to
# handle nested renames correctly, most specific first.
curchanges = sorted(current.iter_changes(origin),
key=lambda c: c[1][0] or c[1][1],
reverse=True)
for (fileid, paths, changed_content, versioned, parent, name,
kind, executable) in curchanges:
if paths[0] == u'' or paths[1] == u'':
# ignore changes to tree root
continue
# bazaar tracks directories, mercurial does not, so
# we have to rename the directory contents
if kind[1] == 'directory':
if kind[0] not in (None, 'directory'):
# Replacing 'something' with a directory, record it
# so it can be removed.
changes.append((self.recode(paths[0]), revid))
if kind[0] == 'directory' and None not in paths:
renaming = paths[0] != paths[1]
# neither an add nor an delete - a move
# rename all directory contents manually
subdir = origin.inventory.path2id(paths[0])
# get all child-entries of the directory
for name, entry in origin.inventory.iter_entries(subdir):
# hg does not track directory renames
if entry.kind == 'directory':
continue
frompath = self.recode(paths[0] + '/' + name)
if frompath in seen:
# Already handled by a more specific change entry
# This is important when you have:
# a => b
# a/c => a/c
# Here a/c must not be renamed into b/c
continue
seen.add(frompath)
if not renaming:
continue
topath = self.recode(paths[1] + '/' + name)
# register the files as changed
changes.append((frompath, revid))
changes.append((topath, revid))
# add to mode cache
mode = ((entry.executable and 'x')
or (entry.kind == 'symlink' and 's')
or '')
self._modecache[(topath, revid)] = mode
# register the change as move
renames[topath] = frompath
# no further changes, go to the next change
continue
# we got unicode paths, need to convert them
path, topath = paths
if path is not None:
path = self.recode(path)
if topath is not None:
topath = self.recode(topath)
seen.add(path or topath)
if topath is None:
# file deleted
changes.append((path, revid))
continue
# renamed
if path and path != topath:
renames[topath] = path
changes.append((path, revid))
# populate the mode cache
kind, executable = [e[1] for e in (kind, executable)]
mode = ((executable and 'x') or (kind == 'symlink' and 'l')
or '')
self._modecache[(topath, revid)] = mode
changes.append((topath, revid))
return changes, renames
def _filterghosts(self, ids):
"""Filters out ghost revisions which hg does not support, see
<http://bazaar-vcs.org/GhostRevision>
"""
parentmap = self.sourcerepo.get_parent_map(ids)
parents = tuple([parent for parent in ids if parent in parentmap])
return parents
|
mit
|
msurovcak/PerfKitBenchmarker
|
perfkitbenchmarker/packages/fio.py
|
3
|
7242
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing fio installation, cleanup, parsing functions."""
import ConfigParser
import io
from perfkitbenchmarker import regex_util
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
FIO_DIR = '%s/fio' % vm_util.VM_TMP_DIR
GIT_REPO = 'http://git.kernel.dk/fio.git'
GIT_TAG = 'fio-2.1.14'
FIO_PATH = FIO_DIR + '/fio'
FIO_CMD_PREFIX = '%s --output-format=json' % FIO_PATH
SECTION_REGEX = r'\[(\w+)\]\n([\w\d\n=*$/]+)'
PARAMETER_REGEX = r'(\w+)=([/\w\d$*]+)\n'
GLOBAL = 'global'
CMD_SECTION_REGEX = r'--name=(\w+)\s+'
JOB_SECTION_REPL_REGEX = r'[\1]\n'
CMD_PARAMETER_REGEX = r'--(\w+=[/\w\d]+)\n'
CMD_PARAMETER_REPL_REGEX = r'\1\n'
CMD_STONEWALL_PARAMETER = '--stonewall'
JOB_STONEWALL_PARAMETER = 'stonewall'
def _Install(vm):
"""Installs the fio package on the VM."""
vm.Install('build_tools')
vm.RemoteCommand('git clone {0} {1}'.format(GIT_REPO, FIO_DIR))
vm.RemoteCommand('cd {0} && git checkout {1}'.format(FIO_DIR, GIT_TAG))
vm.RemoteCommand('cd {0} && ./configure && make'.format(FIO_DIR))
def YumInstall(vm):
"""Installs the fio package on the VM."""
vm.InstallPackages('libaio-devel libaio bc')
_Install(vm)
def AptInstall(vm):
"""Installs the fio package on the VM."""
vm.InstallPackages('libaio-dev libaio1 bc')
_Install(vm)
def ParseJobFile(job_file):
"""Parse fio job file as dictionaries of sample metadata.
Args:
job_file: The contents of fio job file.
Returns:
A dictionary of dictionaries of sample metadata, using test name as keys,
dictionaries of sample metadata as value.
"""
config = ConfigParser.RawConfigParser(allow_no_value=True)
config.readfp(io.BytesIO(job_file))
global_metadata = {}
if GLOBAL in config.sections():
global_metadata = dict(config.items(GLOBAL))
section_metadata = {}
for section in config.sections():
if section != GLOBAL:
metadata = {}
metadata.update(dict(config.items(section)))
metadata.update(global_metadata)
if JOB_STONEWALL_PARAMETER in metadata:
del metadata[JOB_STONEWALL_PARAMETER]
section_metadata[section] = metadata
return section_metadata
def FioParametersToJob(fio_parameters):
"""Translate fio parameters into a job config file.
Sample fio parameters:
--filesize=10g --directory=/scratch0
--name=sequential_write --overwrite=0 --rw=write
Output:
[global]
filesize=10g
directory=/scratch0
[sequential_write]
overwrite=0
rw=write
Args:
fio_parameter: string. Fio parameters in string format.
Returns:
A string representing a fio job config file.
"""
fio_parameters = fio_parameters.replace(' ', '\n')
fio_parameters = regex_util.Substitute(
CMD_SECTION_REGEX, JOB_SECTION_REPL_REGEX, fio_parameters)
fio_parameters = '[%s]\n%s' % (GLOBAL, fio_parameters)
fio_parameters = regex_util.Substitute(
CMD_PARAMETER_REGEX, CMD_PARAMETER_REPL_REGEX, fio_parameters)
return fio_parameters.replace(CMD_STONEWALL_PARAMETER,
JOB_STONEWALL_PARAMETER)
def ParseResults(job_file, fio_json_result):
"""Parse fio json output into samples.
Args:
job_file: The contents of the fio job file.
fio_json_result: Fio results in json format.
Returns:
A list of sample.Sample objects.
"""
samples = []
parameter_metadata = ParseJobFile(job_file)
io_modes = ['read', 'write', 'trim']
for job in fio_json_result['jobs']:
job_name = job['jobname']
for mode in io_modes:
if job[mode]['io_bytes']:
metric_name = '%s:%s' % (job_name, mode)
parameters = parameter_metadata[job_name]
bw_metadata = {
'bw_min': job[mode]['bw_min'],
'bw_max': job[mode]['bw_max'],
'bw_dev': job[mode]['bw_dev'],
'bw_agg': job[mode]['bw_agg'],
'bw_mean': job[mode]['bw_mean']}
bw_metadata.update(parameters)
samples.append(
sample.Sample('%s:bandwidth' % metric_name,
job[mode]['bw'],
'KB/s', bw_metadata))
# There is one sample whose metric is '<metric_name>:latency'
# with all of the latency statistics in its metadata, and then
# a bunch of samples whose metrics are
# '<metric_name>:latency:min' through
# '<metric_name>:latency:p99.99' that hold the individual
# latency numbers as values. This is for historical reasons.
clat_section = job[mode]['clat']
percentiles = clat_section['percentile']
lat_statistics = [
('min', clat_section['min']),
('max', clat_section['max']),
('mean', clat_section['mean']),
('stddev', clat_section['stddev']),
('p1', percentiles['1.000000']),
('p5', percentiles['5.000000']),
('p10', percentiles['10.000000']),
('p20', percentiles['20.000000']),
('p30', percentiles['30.000000']),
('p40', percentiles['40.000000']),
('p50', percentiles['50.000000']),
('p60', percentiles['60.000000']),
('p70', percentiles['70.000000']),
('p80', percentiles['80.000000']),
('p90', percentiles['90.000000']),
('p95', percentiles['95.000000']),
('p99', percentiles['99.000000']),
('p99.5', percentiles['99.500000']),
('p99.9', percentiles['99.900000']),
('p99.95', percentiles['99.950000']),
('p99.99', percentiles['99.990000'])]
lat_metadata = parameters.copy()
for name, val in lat_statistics:
lat_metadata[name] = val
samples.append(
sample.Sample('%s:latency' % metric_name,
job[mode]['clat']['mean'],
'usec', lat_metadata))
for stat_name, stat_val in lat_statistics:
samples.append(
sample.Sample('%s:latency:%s' % (metric_name, stat_name),
stat_val, 'usec', parameters))
samples.append(
sample.Sample('%s:iops' % metric_name,
job[mode]['iops'], '', parameters))
return samples
def DeleteParameterFromJobFile(job_file, parameter):
"""Delete all occurance of parameter from job_file.
Args:
job_file: The contents of the fio job file.
parameter: The parameter to be deleted in job file.
Returns:
A string representing a fio job file after removing parameter.
"""
try:
return regex_util.Substitute(r'%s=[\w\d_/]+\n' % parameter, '', job_file)
except regex_util.NoMatchError:
return job_file
|
apache-2.0
|
genenetwork/genenetwork2_diet
|
wqflask/utility/Plot.py
|
2
|
45616
|
# Copyright (C) University of Tennessee Health Science Center, Memphis, TN.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero General Public License for more details.
#
# This program is available from Source Forge: at GeneNetwork Project
# (sourceforge.net/projects/genenetwork/).
#
# Contact Drs. Robert W. Williams and Xiaodong Zhou (2010)
# at [email protected] and [email protected]
#
#
#
# This module is used by GeneNetwork project (www.genenetwork.org)
#
# Created by GeneNetwork Core Team 2010/08/10
#
# Last updated by GeneNetwork Core Team 2010/10/20
#import piddle as pid
from __future__ import print_function
from pprint import pformat as pf
print("Lysol")
from math import *
import random
import sys, os
from numarray import linear_algebra as la
from numarray import ones, array, dot, swapaxes
import reaper
sys.path.append("..")
print(sys.path)
from basicStatistics import corestats
import svg
import webqtlUtil
from base import webqtlConfig
def cformat(d, rank=0):
'custom string format'
strD = "%2.6f" % d
if rank == 0:
while strD[-1] in ('0','.'):
if strD[-1] == '0' and strD[-2] == '.' and len(strD) <= 4:
break
elif strD[-1] == '.':
strD = strD[:-1]
break
else:
strD = strD[:-1]
else:
strD = strD.split(".")[0]
if strD == '-0.0':
strD = '0.0'
return strD
def frange(start, end=None, inc=1.0):
"A faster range-like function that does accept float increments..."
if end == None:
end = start + 0.0
start = 0.0
else:
start += 0.0 # force it to be a float
count = int((end - start) / inc)
if start + count * inc != end:
# Need to adjust the count. AFAICT, it always comes up one short.
count += 1
L = [start] * count
for i in xrange(1, count):
L[i] = start + i * inc
return L
def gammln(xx):
cof=[76.18009173,-86.50532033,24.01409822,-1.231739516,0.120858003e-2,-0.536382e-5]
x=xx-1.0
tmp=x+5.5
tmp -=(x+0.5)*log(tmp)
ser=1.0
for item in cof:
x+=1.0
ser+=item/x
return -tmp+log(2.50662827465*ser)
def gser(a,x):
gln=gammln(a)
ITMAX=100
EPS=3.0e-7
if x<=0.0:
gamser=0.0
return [gamser,gln]
else:
ap=a
sum=1.0/a
dele=sum
for i in range(1,ITMAX+1):
ap+=1.0
dele*=x/ap
sum+=dele
if abs(dele)<abs(sum)*EPS:
gamser=sum*exp(-x+a*log(x)-gln)
return [gamser,gln]
return None
def gcf(a,x):
ITMAX=100
EPS=3.0e-7
gold=0.0
fac=1
b1=1.0
b0=0.0
a0=1.0
gln=gammln(a)
a1=x
for n in range(1,ITMAX+1):
an=n+0.0
ana=an-a
a0=(a1+a0*ana)*fac
b0=(b1+b0*ana)*fac
anf=an*fac
a1=x*a0+anf*a1
b1=x*b0+anf*b1
if (a1):
fac=1.0/a1
g=b1*fac
if abs((g-gold)/g)<EPS:
gammcf=exp(-x+a*log(x)-gln)*g
return [gammcf,gln]
gold=g
return None
def gammp(a,x):
if x<0.0 or a<=0.0:
return None
if x<(a+1.0):
a=gser(a,x)[0]
return a
else:
a=gcf(a,x)[0]
return 1.0-a
def U(n):
x=pow(0.5,1.0/n)
m=[1-x]
for i in range(2,n):
a=(i-0.3175)/(n+0.365)
m.append(a)
m.append(x)
return m
def erf(x):
if x<0.0:
return -gammp(0.5,x*x)
else:
return gammp(0.5,x*x)
def erfcc(x):
z=abs(x)
t=1.0/(1.0+0.5*z)
ans=t*exp(-z*z-1.26551223+t*(1.00002368+t*(0.37409196+t*(0.09678418+t*(-0.18628806+t*(0.27886807+t*(-1.13520398+t*(1.48851587+t*(-0.82215223+t*0.17087277)))))))))
if x>=0.0:
return ans
else:
return 2.0-ans
def calMeanVar(data):
n=len(data)
if n<2:
return None
else:
sum=reduce(lambda x,y:x+y,data,0.0)
mean=sum/n
z=data[:]
for i in range(n):
z[i]=z[i]-mean
variance=reduce(lambda x,y:x+y*y,z,0.0)
variance /= n-1
variance =sqrt(variance)
for i in range(n):
z[i]=z[i]/variance
return z
def inverseCumul(p):
#Coefficients in rational approximations.
a = [-3.969683028665376e+01,2.209460984245205e+02,-2.759285104469687e+02,1.383577518672690e+02,-3.066479806614716e+01,2.506628277459239e+00]
b = [-5.447609879822406e+01,1.615858368580409e+02,-1.556989798598866e+02,6.680131188771972e+01,-1.328068155288572e+01]
c = [-7.784894002430293e-03,-3.223964580411365e-01,-2.400758277161838e+00,-2.549732539343734e+00,4.374664141464968e+00,2.938163982698783e+00]
d = [7.784695709041462e-03,3.224671290700398e-01,2.445134137142996e+00,3.754408661907416e+00]
#Define break-points.
p_low = 0.02425
p_high = 1 - p_low
#Rational approximation for lower region.
if p > 0 and p < p_low:
q = sqrt(-2*log(p))
x = (((((c[0]*q+c[1])*q+c[2])*q+c[3])*q+c[4])*q+c[5]) / ((((d[0]*q+d[1])*q+d[2])*q+d[3])*q+1)
#Rational approximation for central region.
elif p>= p_low and p <= p_high:
q = p - 0.5
r = q*q
x = (((((a[0]*r+a[1])*r+a[2])*r+a[3])*r+a[4])*r+a[5])*q /(((((b[0]*r+b[1])*r+b[2])*r+b[3])*r+b[4])*r+1)
#Rational approximation for upper region.
elif p>p_high and p < 1:
q = sqrt(-2*log(1-p))
x = -(((((c[0]*q+c[1])*q+c[2])*q+c[3])*q+c[4])*q+c[5]) /((((d[0]*q+d[1])*q+d[2])*q+d[3])*q+1)
else:
return None
if p>0 and p < 1:
e = 0.5 * erfcc(-x/sqrt(2)) - p
u = e * sqrt(2*pi) * exp(x*x/2)
x = x - u/(1 + x*u/2)
return x
else:
return None
def gmean(lst):
N = len(lst)
if N == 0:
return 0
else:
return (reduce(lambda x,y: x+y, lst, 0.0))/N
def gmedian(lst2):
lst = lst2[:]
N = len(lst)
if N == 0:
return 0
else:
lst.sort()
if N % 2 == 0:
return (lst[N/2]+lst[(N-2)/2])/2.0
else:
return lst[(N-1)/2]
def gpercentile(lst2, np):
"""Obsolete - use percentile in corestats instead"""
lst = lst2[:]
N = len(lst)
if N == 0 or np > 100 or np < 0:
return None
else:
lst.sort()
pNadd1 = (np/100.0)*N
k = int(pNadd1)
d = pNadd1 - k
if k == 0:
return lst[0]
elif k >= N-1:
return lst[N-1]
else:
return lst[k-1] + d*(lst[k] - lst[k-1])
def find_outliers(vals):
"""Calculates the upper and lower bounds of a set of sample/case values
>>> find_outliers([3.504, 5.234, 6.123, 7.234, 3.542, 5.341, 7.852, 4.555, 12.537])
(11.252500000000001, 0.5364999999999993)
>>> >>> find_outliers([9,12,15,17,31,50,7,5,6,8])
(32.0, -8.0)
If there are no vals, returns None for the upper and lower bounds,
which code that calls it will have to deal with.
>>> find_outliers([])
(None, None)
"""
print("xerxes vals is:", pf(vals))
if vals:
#print("vals is:", pf(vals))
stats = corestats.Stats(vals)
low_hinge = stats.percentile(25)
up_hinge = stats.percentile(75)
hstep = 1.5 * (up_hinge - low_hinge)
upper_bound = up_hinge + hstep
lower_bound = low_hinge - hstep
else:
upper_bound = None
lower_bound = None
print(pf(locals()))
return upper_bound, lower_bound
def plotBoxPlot(canvas, data, offset= (40, 40, 40, 40), XLabel="Category", YLabel="Value"):
xLeftOffset, xRightOffset, yTopOffset, yBottomOffset = offset
plotWidth = canvas.size[0] - xLeftOffset - xRightOffset
plotHeight = canvas.size[1] - yTopOffset - yBottomOffset
iValues = []
for item in data:
for item2 in item[1]:
try:
iValues.append(item2[1])
except:
iValues.append(item2)
#draw frame
max_Y = max(iValues)
min_Y = min(iValues)
scaleY = detScale(min_Y, max_Y)
Yll = scaleY[0]
Yur = scaleY[1]
nStep = scaleY[2]
stepY = (Yur - Yll)/nStep
stepYPixel = plotHeight/(nStep)
canvas.drawRect(plotWidth+xLeftOffset, plotHeight + yTopOffset, xLeftOffset, yTopOffset)
##draw Y Scale
YYY = Yll
YCoord = plotHeight + yTopOffset
scaleFont=pid.Font(ttf="cour",size=11,bold=1)
for i in range(nStep+1):
strY = cformat(d=YYY, rank=0)
YCoord = max(YCoord, yTopOffset)
canvas.drawLine(xLeftOffset,YCoord,xLeftOffset-5,YCoord)
canvas.drawString(strY, xLeftOffset -30,YCoord +5,font=scaleFont)
YYY += stepY
YCoord -= stepYPixel
##draw X Scale
stepX = plotWidth/len(data)
XCoord = xLeftOffset + 0.5*stepX
YCoord = plotHeight + yTopOffset
scaleFont = pid.Font(ttf="tahoma",size=12,bold=0)
labelFont = pid.Font(ttf="tahoma",size=13,bold=0)
for item in data:
itemname, itemvalue = item
canvas.drawLine(XCoord, YCoord,XCoord, YCoord+5, color=pid.black)
canvas.drawString(itemname, XCoord - canvas.stringWidth(itemname,font=labelFont)/2.0,\
YCoord +20,font=labelFont)
nValue = len(itemvalue)
catValue = []
for item2 in itemvalue:
try:
tstrain, tvalue = item2
except:
tvalue = item2
if nValue <= 4:
canvas.drawCross(XCoord, plotHeight + yTopOffset - (tvalue-Yll)*plotHeight/(Yur - Yll), color=pid.red,size=5)
else:
catValue.append(tvalue)
if catValue != []:
catMean = gmean(catValue)
catMedian = gmedian(catValue)
lowHinge = gpercentile(catValue, 25)
upHinge = gpercentile(catValue, 75)
Hstep = 1.5*(upHinge - lowHinge)
outlier = []
extrem = []
upperAdj = None
for item in catValue:
if item >= upHinge + 2*Hstep:
extrem.append(item)
elif item >= upHinge + Hstep:
outlier.append(item)
elif item > upHinge and item < upHinge + Hstep:
if upperAdj == None or item > upperAdj:
upperAdj = item
else:
pass
lowerAdj = None
for item in catValue:
if item <= lowHinge - 2*Hstep:
extrem.append(item)
elif item <= lowHinge - Hstep:
outlier.append(item)
if item < lowHinge and item > lowHinge - Hstep:
if lowerAdj == None or item < lowerAdj:
lowerAdj = item
else:
pass
canvas.drawRect(XCoord-20, plotHeight + yTopOffset - (lowHinge-Yll)*plotHeight/(Yur - Yll), \
XCoord+20, plotHeight + yTopOffset - (upHinge-Yll)*plotHeight/(Yur - Yll))
canvas.drawLine(XCoord-20, plotHeight + yTopOffset - (catMedian-Yll)*plotHeight/(Yur - Yll), \
XCoord+20, plotHeight + yTopOffset - (catMedian-Yll)*plotHeight/(Yur - Yll))
if upperAdj != None:
canvas.drawLine(XCoord, plotHeight + yTopOffset - (upHinge-Yll)*plotHeight/(Yur - Yll), \
XCoord, plotHeight + yTopOffset - (upperAdj-Yll)*plotHeight/(Yur - Yll))
canvas.drawLine(XCoord-20, plotHeight + yTopOffset - (upperAdj-Yll)*plotHeight/(Yur - Yll), \
XCoord+20, plotHeight + yTopOffset - (upperAdj-Yll)*plotHeight/(Yur - Yll))
if lowerAdj != None:
canvas.drawLine(XCoord, plotHeight + yTopOffset - (lowHinge-Yll)*plotHeight/(Yur - Yll), \
XCoord, plotHeight + yTopOffset - (lowerAdj-Yll)*plotHeight/(Yur - Yll))
canvas.drawLine(XCoord-20, plotHeight + yTopOffset - (lowerAdj-Yll)*plotHeight/(Yur - Yll), \
XCoord+20, plotHeight + yTopOffset - (lowerAdj-Yll)*plotHeight/(Yur - Yll))
outlierFont = pid.Font(ttf="cour",size=12,bold=0)
if outlier != []:
for item in outlier:
yc = plotHeight + yTopOffset - (item-Yll)*plotHeight/(Yur - Yll)
#canvas.drawEllipse(XCoord-3, yc-3, XCoord+3, yc+3)
canvas.drawString('o', XCoord-3, yc+5, font=outlierFont, color=pid.orange)
if extrem != []:
for item in extrem:
yc = plotHeight + yTopOffset - (item-Yll)*plotHeight/(Yur - Yll)
#canvas.drawEllipse(XCoord-3, yc-3, XCoord+3, yc+3)
canvas.drawString('*', XCoord-3, yc+6, font=outlierFont, color=pid.red)
canvas.drawCross(XCoord, plotHeight + yTopOffset - (catMean-Yll)*plotHeight/(Yur - Yll), \
color=pid.blue,size=3)
#print (catMean, catMedian, cat25per, cat75per)
pass
XCoord += stepX
labelFont=pid.Font(ttf="verdana",size=18,bold=0)
canvas.drawString(XLabel, xLeftOffset + (plotWidth -canvas.stringWidth(XLabel,font=labelFont))/2.0, \
YCoord +40, font=labelFont)
canvas.drawString(YLabel,xLeftOffset-40, YCoord-(plotHeight -canvas.stringWidth(YLabel,font=labelFont))/2.0,\
font=labelFont, angle =90)
def plotSecurity(canvas, text="12345"):
if not text:
return
plotWidth = canvas.size[0]
plotHeight = canvas.size[1]
if plotHeight<=0 or plotWidth<=0:
return
bgColor = pid.Color(0.6+0.4*random.random(), 0.6+0.4*random.random(), 0.6+0.4*random.random())
canvas.drawRect(0,0,plotWidth,plotHeight, edgeColor=bgColor, fillColor=bgColor)
for i in range(30):
randomColor = pid.Color(0.6+0.4*random.random(), 0.6+0.4*random.random(), 0.6+0.4*random.random())
scaleFont=pid.Font(ttf="cour",size=random.choice(range(20, 50)))
canvas.drawString(random.choice('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'),
int(random.random()*plotWidth), int(random.random()*plotHeight), font=scaleFont,
color=randomColor, angle=random.choice(range(-45, 50)))
step = (plotWidth-20)/len(text)
startX = 20
for item in text:
randomColor = pid.Color(0.6*random.random(),0.6*random.random(), 0.6*random.random())
scaleFont=pid.Font(ttf="verdana",size=random.choice(range(50, 60)),bold=1)
canvas.drawString(item, startX, plotHeight/2-10, font=scaleFont,
color=randomColor, angle=random.choice(range(-45, 50)))
startX += step
# parameter: data is either object returned by reaper permutation function (called by MarkerRegressionPage.py)
# or the first object returned by direct (pair-scan) permu function (called by DirectPlotPage.py)
#def plotBar(canvas, data, barColor=pid.blue, axesColor=pid.black, labelColor=pid.black, XLabel=None, YLabel=None, title=None, offset= (60, 20, 40, 40), zoom = 1):
#
# xLeftOffset, xRightOffset, yTopOffset, yBottomOffset = offset
#
# plotWidth = canvas.size[0] - xLeftOffset - xRightOffset
# plotHeight = canvas.size[1] - yTopOffset - yBottomOffset
# if plotHeight<=0 or plotWidth<=0:
# return
#
# if len(data) < 2:
# return
#
# max_D = max(data)
# min_D = min(data)
# #add by NL 06-20-2011: fix the error: when max_D is infinite, log function in detScale will go wrong
# if max_D == float('inf') or max_D>webqtlConfig.MAXLRS:
# max_D=webqtlConfig.MAXLRS #maximum LRS value
#
# xLow, xTop, stepX = detScale(min_D, max_D)
#
# #reduce data
# step = ceil((xTop-xLow)/50.0)
# j = xLow
# dataXY = []
# Count = []
# while j <= xTop:
# dataXY.append(j)
# Count.append(0)
# j += step
#
# for i, item in enumerate(data):
# if item == float('inf') or item>webqtlConfig.MAXLRS:
# item = webqtlConfig.MAXLRS #maximum LRS value
# j = int((item-xLow)/step)
# Count[j] += 1
#
# yLow, yTop, stepY=detScale(0,max(Count))
#
# #draw data
# xScale = plotWidth/(xTop-xLow)
# yScale = plotHeight/(yTop-yLow)
# barWidth = xScale*step
#
# for i, count in enumerate(Count):
# if count:
# xc = (dataXY[i]-xLow)*xScale+xLeftOffset
# yc =-(count-yLow)*yScale+yTopOffset+plotHeight
# canvas.drawRect(xc+2,yc,xc+barWidth-2,yTopOffset+plotHeight,edgeColor=barColor,fillColor=barColor)
#
# #draw drawing region
# canvas.drawRect(xLeftOffset, yTopOffset, xLeftOffset+plotWidth, yTopOffset+plotHeight)
#
# #draw scale
# scaleFont=pid.Font(ttf="cour",size=11,bold=1)
# x=xLow
# for i in range(stepX+1):
# xc=xLeftOffset+(x-xLow)*xScale
# canvas.drawLine(xc,yTopOffset+plotHeight,xc,yTopOffset+plotHeight+5, color=axesColor)
# strX = cformat(d=x, rank=0)
# canvas.drawString(strX,xc-canvas.stringWidth(strX,font=scaleFont)/2,yTopOffset+plotHeight+14,font=scaleFont)
# x+= (xTop - xLow)/stepX
#
# y=yLow
# for i in range(stepY+1):
# yc=yTopOffset+plotHeight-(y-yLow)*yScale
# canvas.drawLine(xLeftOffset,yc,xLeftOffset-5,yc, color=axesColor)
# strY = "%d" %y
# canvas.drawString(strY,xLeftOffset-canvas.stringWidth(strY,font=scaleFont)-6,yc+5,font=scaleFont)
# y+= (yTop - yLow)/stepY
#
# #draw label
# labelFont=pid.Font(ttf="tahoma",size=17,bold=0)
# if XLabel:
# canvas.drawString(XLabel,xLeftOffset+(plotWidth-canvas.stringWidth(XLabel,font=labelFont))/2.0,
# yTopOffset+plotHeight+yBottomOffset-10,font=labelFont,color=labelColor)
#
# if YLabel:
# canvas.drawString(YLabel, 19, yTopOffset+plotHeight-(plotHeight-canvas.stringWidth(YLabel,font=labelFont))/2.0,
# font=labelFont,color=labelColor,angle=90)
#
# labelFont=pid.Font(ttf="verdana",size=16,bold=0)
# if title:
# canvas.drawString(title,xLeftOffset+(plotWidth-canvas.stringWidth(title,font=labelFont))/2.0,
# 20,font=labelFont,color=labelColor)
#
#def plotBarText(canvas, data, label, variance=None, barColor=pid.blue, axesColor=pid.black, labelColor=pid.black, XLabel=None, YLabel=None, title=None, sLabel = None, offset= (80, 20, 40, 100), barSpace = 2, zoom = 1):
# xLeftOffset, xRightOffset, yTopOffset, yBottomOffset = offset
# plotWidth = canvas.size[0] - xLeftOffset - xRightOffset
# plotHeight = canvas.size[1] - yTopOffset - yBottomOffset
# if plotHeight<=0 or plotWidth<=0:
# return
#
# NNN = len(data)
# if NNN < 2 or NNN != len(label):
# return
# if variance and len(variance)!=NNN:
# variance = []
#
# Y2 = data[:]
# if variance:
# for i in range(NNN):
# if variance[i]:
# Y2 += [data[i]-variance[i]]
#
# #Y axis
# YLow, YTop, stepY = detScale(min(Y2), max(Y2))
# YScale = plotHeight/(YTop - YLow)
#
# if YLow < 0 and YTop > 0:
# drawZero = 1
# else:
# drawZero = 0
#
# #X axis
# X = range(NNN)
# Xll= 0
# Xur= NNN-1
#
#
# if drawZero:
# YZero = yTopOffset+plotHeight-YScale*(0-YLow)
# canvas.drawLine(xLeftOffset, YZero, xLeftOffset+plotWidth, YZero)
# else:
# YZero = yTopOffset+plotHeight
# #draw data
# spaceWidth = barSpace
# if spaceWidth < 1:
# spaceWidth = 1
# barWidth = int((plotWidth - (NNN-1.0)*spaceWidth)/NNN)
#
# xc= xLeftOffset
# scaleFont=pid.Font(ttf="verdana",size=11,bold=0)
# for i in range(NNN):
# yc = yTopOffset+plotHeight-(data[i]-YLow)*YScale
# canvas.drawRect(xc,YZero,xc+barWidth-1, yc, edgeColor=barColor,fillColor=barColor)
# if variance and variance[i]:
# varlen = variance[i]*YScale
# if yc-varlen < yTopOffset:
# topYd = yTopOffset
# else:
# topYd = yc-varlen
# canvas.drawLine(xc+barWidth/2-2,yc-varlen,xc+barWidth/2+2,yc-varlen,color=pid.red)
# canvas.drawLine(xc+barWidth/2,yc+varlen,xc+barWidth/2,topYd,color=pid.red)
# canvas.drawLine(xc+barWidth/2-2,yc+varlen,xc+barWidth/2+2,yc+varlen,color=pid.red)
# strX = label[i]
# canvas.drawString(strX,xc+barWidth/2.0+2,yTopOffset+plotHeight+2+canvas.stringWidth(strX,font=scaleFont),font=scaleFont,angle=90)
# xc += barWidth + spaceWidth
#
# #draw drawing region
# canvas.drawRect(xLeftOffset, yTopOffset, xLeftOffset+plotWidth, yTopOffset+plotHeight)
#
# #draw Y scale
# scaleFont=pid.Font(ttf="cour",size=16,bold=1)
# y=YLow
# for i in range(stepY+1):
# yc=yTopOffset+plotHeight-(y-YLow)*YScale
# canvas.drawLine(xLeftOffset,yc,xLeftOffset-5,yc, color=axesColor)
# strY = cformat(d=y, rank=0)
# canvas.drawString(strY,xLeftOffset-canvas.stringWidth(strY,font=scaleFont)-6,yc+5,font=scaleFont)
# y+= (YTop - YLow)/stepY
#
# #draw label
# labelFont=pid.Font(ttf="verdana",size=17,bold=0)
# if XLabel:
# canvas.drawString(XLabel,xLeftOffset+(plotWidth-canvas.stringWidth(XLabel,font=labelFont))/2.0,yTopOffset+plotHeight+65,font=labelFont,color=labelColor)
#
# if YLabel:
# canvas.drawString(YLabel,xLeftOffset-50, yTopOffset+plotHeight-(plotHeight-canvas.stringWidth(YLabel,font=labelFont))/2.0,font=labelFont,color=labelColor,angle=90)
#
# labelFont=pid.Font(ttf="verdana",size=18,bold=0)
# if title:
# canvas.drawString(title,xLeftOffset,yTopOffset-15,font=labelFont,color=labelColor)
#
# return
#
#def plotXY(canvas, dataX, dataY, rank=0, dataLabel=[], plotColor = pid.black, axesColor=pid.black, labelColor=pid.black, lineSize="thin", lineColor=pid.grey, idFont="arial", idColor=pid.blue, idSize="14", symbolColor=pid.black, symbolType="circle", filled="yes", symbolSize="tiny", XLabel=None, YLabel=None, title=None, fitcurve=None, connectdot=1, displayR=None, loadingPlot = 0, offset= (80, 20, 40, 60), zoom = 1, specialCases=[], showLabel = 1, bufferSpace = 15):
# 'displayR : correlation scatter plot, loadings : loading plot'
#
# dataXRanked, dataYRanked = webqtlUtil.calRank(dataX, dataY, len(dataX))
#
# #get ID font size
# idFontSize = int(idSize)
#
# #If filled is yes, set fill color
# if filled == "yes":
# fillColor = symbolColor
# else:
# fillColor = None
#
# if symbolSize == "large":
# sizeModifier = 7
# fontModifier = 12
# elif symbolSize == "medium":
# sizeModifier = 5
# fontModifier = 8
# elif symbolSize == "small":
# sizeModifier = 3
# fontModifier = 3
# else:
# sizeModifier = 1
# fontModifier = -1
#
# if rank == 0: # Pearson correlation
# bufferSpace = 0
# dataXPrimary = dataX
# dataYPrimary = dataY
# dataXAlt = dataXRanked #Values used just for printing the other corr type to the graph image
# dataYAlt = dataYRanked #Values used just for printing the other corr type to the graph image
# else: # Spearman correlation: Switching Ranked and Unranked X and Y values
# dataXPrimary = dataXRanked
# dataYPrimary = dataYRanked
# dataXAlt = dataX #Values used just for printing the other corr type to the graph image
# dataYAlt = dataY #Values used just for printing the other corr type to the graph image
#
# xLeftOffset, xRightOffset, yTopOffset, yBottomOffset = offset
# plotWidth = canvas.size[0] - xLeftOffset - xRightOffset
# plotHeight = canvas.size[1] - yTopOffset - yBottomOffset
# if plotHeight<=0 or plotWidth<=0:
# return
# if len(dataXPrimary) < 1 or len(dataXPrimary) != len(dataYPrimary) or (dataLabel and len(dataXPrimary) != len(dataLabel)):
# return
#
# max_X=max(dataXPrimary)
# min_X=min(dataXPrimary)
# max_Y=max(dataYPrimary)
# min_Y=min(dataYPrimary)
#
# #for some reason I forgot why I need to do this
# if loadingPlot:
# min_X = min(-0.1,min_X)
# max_X = max(0.1,max_X)
# min_Y = min(-0.1,min_Y)
# max_Y = max(0.1,max_Y)
#
# xLow, xTop, stepX=detScale(min_X,max_X)
# yLow, yTop, stepY=detScale(min_Y,max_Y)
# xScale = plotWidth/(xTop-xLow)
# yScale = plotHeight/(yTop-yLow)
#
# #draw drawing region
# canvas.drawRect(xLeftOffset-bufferSpace, yTopOffset, xLeftOffset+plotWidth, yTopOffset+plotHeight+bufferSpace)
# canvas.drawRect(xLeftOffset-bufferSpace+1, yTopOffset, xLeftOffset+plotWidth, yTopOffset+plotHeight+bufferSpace-1)
#
# #calculate data points
# data = map(lambda X, Y: (X, Y), dataXPrimary, dataYPrimary)
# xCoord = map(lambda X, Y: ((X-xLow)*xScale + xLeftOffset, yTopOffset+plotHeight-(Y-yLow)*yScale), dataXPrimary, dataYPrimary)
#
# labelFont=pid.Font(ttf=idFont,size=idFontSize,bold=0)
#
# if loadingPlot:
# xZero = -xLow*xScale+xLeftOffset
# yZero = yTopOffset+plotHeight+yLow*yScale
# for point in xCoord:
# canvas.drawLine(xZero,yZero,point[0],point[1],color=pid.red)
# else:
# if connectdot:
# canvas.drawPolygon(xCoord,edgeColor=plotColor,closed=0)
# else:
# pass
#
# symbolFont = pid.Font(ttf="fnt_bs", size=12+fontModifier,bold=0)
#
# for i, item in enumerate(xCoord):
# if dataLabel and dataLabel[i] in specialCases:
# canvas.drawRect(item[0]-3, item[1]-3, item[0]+3, item[1]+3, edgeColor=pid.green)
# #canvas.drawCross(item[0],item[1],color=pid.blue,size=5)
# else:
# if symbolType == "vertRect":
# canvas.drawRect(x1=item[0]-sizeModifier+2,y1=item[1]-sizeModifier-2, x2=item[0]+sizeModifier-1,y2=item[1]+sizeModifier+2, edgeColor=symbolColor, edgeWidth=1, fillColor=fillColor)
# elif (symbolType == "circle" and filled != "yes"):
# canvas.drawString(":", item[0]-canvas.stringWidth(":",font=symbolFont)/2+1,item[1]+2,color=symbolColor, font=symbolFont)
# elif (symbolType == "circle" and filled == "yes"):
# canvas.drawString("5", item[0]-canvas.stringWidth("5",font=symbolFont)/2+1,item[1]+2,color=symbolColor, font=symbolFont)
# elif symbolType == "horiRect":
# canvas.drawRect(x1=item[0]-sizeModifier-1,y1=item[1]-sizeModifier+3, x2=item[0]+sizeModifier+3,y2=item[1]+sizeModifier-2, edgeColor=symbolColor, edgeWidth=1, fillColor=fillColor)
# elif (symbolType == "square"):
# canvas.drawRect(x1=item[0]-sizeModifier+1,y1=item[1]-sizeModifier-4, x2=item[0]+sizeModifier+2,y2=item[1]+sizeModifier-3, edgeColor=symbolColor, edgeWidth=1, fillColor=fillColor)
# elif (symbolType == "diamond" and filled != "yes"):
# canvas.drawString(",", item[0]-canvas.stringWidth(",",font=symbolFont)/2+2, item[1]+6, font=symbolFont, color=symbolColor)
# elif (symbolType == "diamond" and filled == "yes"):
# canvas.drawString("D", item[0]-canvas.stringWidth("D",font=symbolFont)/2+2, item[1]+6, font=symbolFont, color=symbolColor)
# elif symbolType == "4-star":
# canvas.drawString("l", item[0]-canvas.stringWidth("l",font=symbolFont)/2+1, item[1]+3, font=symbolFont, color=symbolColor)
# elif symbolType == "3-star":
# canvas.drawString("k", item[0]-canvas.stringWidth("k",font=symbolFont)/2+1, item[1]+3, font=symbolFont, color=symbolColor)
# else:
# canvas.drawCross(item[0],item[1]-2,color=symbolColor, size=sizeModifier+2)
#
# if showLabel and dataLabel:
# if (symbolType == "vertRect" or symbolType == "diamond"):
# labelGap = 15
# elif (symbolType == "4-star" or symbolType == "3-star"):
# labelGap = 12
# else:
# labelGap = 11
# canvas.drawString(dataLabel[i], item[0]- canvas.stringWidth(dataLabel[i],
# font=labelFont)/2 + 1, item[1]+(labelGap+sizeModifier+(idFontSize-12)), font=labelFont, color=idColor)
#
# #draw scale
# scaleFont=pid.Font(ttf="cour",size=16,bold=1)
#
#
# x=xLow
# for i in range(stepX+1):
# xc=xLeftOffset+(x-xLow)*xScale
# if ((x == 0) & (rank == 1)):
# pass
# else:
# canvas.drawLine(xc,yTopOffset+plotHeight + bufferSpace,xc,yTopOffset+plotHeight+5 + bufferSpace, color=axesColor)
# strX = cformat(d=x, rank=rank)
# if ((strX == "0") & (rank == 1)):
# pass
# else:
# canvas.drawString(strX,xc-canvas.stringWidth(strX,font=scaleFont)/2,yTopOffset+plotHeight+20 + bufferSpace,font=scaleFont)
# x+= (xTop - xLow)/stepX
#
# y=yLow
# for i in range(stepY+1):
# yc=yTopOffset+plotHeight-(y-yLow)*yScale
# if ((y == 0) & (rank == 1)):
# pass
# else:
# canvas.drawLine(xLeftOffset - bufferSpace,yc,xLeftOffset-5 - bufferSpace,yc, color=axesColor)
# strY = cformat(d=y, rank=rank)
# if ((strY == "0") & (rank == 1)):
# pass
# else:
# canvas.drawString(strY,xLeftOffset-canvas.stringWidth(strY,font=scaleFont)- 10 - bufferSpace,yc+4,font=scaleFont)
# y+= (yTop - yLow)/stepY
#
# #draw label
#
# labelFont=pid.Font(ttf="verdana",size=canvas.size[0]/45,bold=0)
# titleFont=pid.Font(ttf="verdana",size=canvas.size[0]/40,bold=0)
#
# if (rank == 1 and not title):
# canvas.drawString("Spearman Rank Correlation", xLeftOffset-canvas.size[0]*.025+(plotWidth-canvas.stringWidth("Spearman Rank Correlation",font=titleFont))/2.0,
# 25,font=titleFont,color=labelColor)
# elif (rank == 0 and not title):
# canvas.drawString("Pearson Correlation", xLeftOffset-canvas.size[0]*.025+(plotWidth-canvas.stringWidth("Pearson Correlation",font=titleFont))/2.0,
# 25,font=titleFont,color=labelColor)
#
# if XLabel:
# canvas.drawString(XLabel,xLeftOffset+(plotWidth-canvas.stringWidth(XLabel,font=labelFont))/2.0,
# yTopOffset+plotHeight+yBottomOffset-25,font=labelFont,color=labelColor)
#
# if YLabel:
# canvas.drawString(YLabel, xLeftOffset-65, yTopOffset+plotHeight- (plotHeight-canvas.stringWidth(YLabel,font=labelFont))/2.0,
# font=labelFont,color=labelColor,angle=90)
#
# labelFont=pid.Font(ttf="verdana",size=20,bold=0)
# if title:
# canvas.drawString(title,xLeftOffset+(plotWidth-canvas.stringWidth(title,font=labelFont))/2.0,
# 20,font=labelFont,color=labelColor)
#
# if fitcurve:
# import sys
# sys.argv = [ "mod_python" ]
# #from numarray import linear_algebra as la
# #from numarray import ones, array, dot, swapaxes
# fitYY = array(dataYPrimary)
# fitXX = array([ones(len(dataXPrimary)),dataXPrimary])
# AA = dot(fitXX,swapaxes(fitXX,0,1))
# BB = dot(fitXX,fitYY)
# bb = la.linear_least_squares(AA,BB)[0]
#
# xc1 = xLeftOffset
# yc1 = yTopOffset+plotHeight-(bb[0]+bb[1]*xLow-yLow)*yScale
# if yc1 > yTopOffset+plotHeight:
# yc1 = yTopOffset+plotHeight
# xc1 = (yLow-bb[0])/bb[1]
# xc1=(xc1-xLow)*xScale+xLeftOffset
# elif yc1 < yTopOffset:
# yc1 = yTopOffset
# xc1 = (yTop-bb[0])/bb[1]
# xc1=(xc1-xLow)*xScale+xLeftOffset
# else:
# pass
#
# xc2 = xLeftOffset + plotWidth
# yc2 = yTopOffset+plotHeight-(bb[0]+bb[1]*xTop-yLow)*yScale
# if yc2 > yTopOffset+plotHeight:
# yc2 = yTopOffset+plotHeight
# xc2 = (yLow-bb[0])/bb[1]
# xc2=(xc2-xLow)*xScale+xLeftOffset
# elif yc2 < yTopOffset:
# yc2 = yTopOffset
# xc2 = (yTop-bb[0])/bb[1]
# xc2=(xc2-xLow)*xScale+xLeftOffset
# else:
# pass
#
# canvas.drawLine(xc1 - bufferSpace,yc1 + bufferSpace,xc2,yc2,color=lineColor)
# if lineSize == "medium":
# canvas.drawLine(xc1 - bufferSpace,yc1 + bufferSpace+1,xc2,yc2+1,color=lineColor)
# if lineSize == "thick":
# canvas.drawLine(xc1 - bufferSpace,yc1 + bufferSpace+1,xc2,yc2+1,color=lineColor)
# canvas.drawLine(xc1 - bufferSpace,yc1 + bufferSpace-1,xc2,yc2-1,color=lineColor)
#
#
# if displayR:
# labelFont=pid.Font(ttf="trebuc",size=canvas.size[0]/60,bold=0)
# NNN = len(dataX)
# corr = webqtlUtil.calCorrelation(dataXPrimary,dataYPrimary,NNN)[0]
#
# if NNN < 3:
# corrPValue = 1.0
# else:
# if abs(corr) >= 1.0:
# corrPValue = 0.0
# else:
# ZValue = 0.5*log((1.0+corr)/(1.0-corr))
# ZValue = ZValue*sqrt(NNN-3)
# corrPValue = 2.0*(1.0 - reaper.normp(abs(ZValue)))
#
# NStr = "N = %d" % NNN
# strLenN = canvas.stringWidth(NStr,font=labelFont)
#
# if rank == 1:
# if corrPValue < 0.0000000000000001:
# corrStr = "Rho = %1.3f P < 1.00 E-16" % (corr)
# else:
# corrStr = "Rho = %1.3f P = %3.2E" % (corr, corrPValue)
# else:
# if corrPValue < 0.0000000000000001:
# corrStr = "r = %1.3f P < 1.00 E-16" % (corr)
# else:
# corrStr = "r = %1.3f P = %3.2E" % (corr, corrPValue)
# strLen = canvas.stringWidth(corrStr,font=labelFont)
#
# canvas.drawString(NStr,xLeftOffset,yTopOffset-10,font=labelFont,color=labelColor)
# canvas.drawString(corrStr,xLeftOffset+plotWidth-strLen,yTopOffset-10,font=labelFont,color=labelColor)
#
# return xCoord
def plotXYSVG(drawSpace, dataX, dataY, rank=0, dataLabel=[], plotColor = "black", axesColor="black", labelColor="black", symbolColor="red", XLabel=None, YLabel=None, title=None, fitcurve=None, connectdot=1, displayR=None, loadingPlot = 0, offset= (80, 20, 40, 60), zoom = 1, specialCases=[], showLabel = 1):
'displayR : correlation scatter plot, loadings : loading plot'
dataXRanked, dataYRanked = webqtlUtil.calRank(dataX, dataY, len(dataX))
# Switching Ranked and Unranked X and Y values if a Spearman Rank Correlation
if rank == 0:
dataXPrimary = dataX
dataYPrimary = dataY
dataXAlt = dataXRanked
dataYAlt = dataYRanked
else:
dataXPrimary = dataXRanked
dataYPrimary = dataYRanked
dataXAlt = dataX
dataYAlt = dataY
xLeftOffset, xRightOffset, yTopOffset, yBottomOffset = offset
plotWidth = drawSpace.attributes['width'] - xLeftOffset - xRightOffset
plotHeight = drawSpace.attributes['height'] - yTopOffset - yBottomOffset
if plotHeight<=0 or plotWidth<=0:
return
if len(dataXPrimary) < 1 or len(dataXPrimary) != len(dataYPrimary) or (dataLabel and len(dataXPrimary) != len(dataLabel)):
return
max_X=max(dataXPrimary)
min_X=min(dataXPrimary)
max_Y=max(dataYPrimary)
min_Y=min(dataYPrimary)
#for some reason I forgot why I need to do this
if loadingPlot:
min_X = min(-0.1,min_X)
max_X = max(0.1,max_X)
min_Y = min(-0.1,min_Y)
max_Y = max(0.1,max_Y)
xLow, xTop, stepX=detScale(min_X,max_X)
yLow, yTop, stepY=detScale(min_Y,max_Y)
xScale = plotWidth/(xTop-xLow)
yScale = plotHeight/(yTop-yLow)
#draw drawing region
r = svg.rect(xLeftOffset, yTopOffset, plotWidth, plotHeight, 'none', axesColor, 1)
drawSpace.addElement(r)
#calculate data points
data = map(lambda X, Y: (X, Y), dataXPrimary, dataYPrimary)
xCoord = map(lambda X, Y: ((X-xLow)*xScale + xLeftOffset, yTopOffset+plotHeight-(Y-yLow)*yScale), dataXPrimary, dataYPrimary)
labelFontF = "verdana"
labelFontS = 11
if loadingPlot:
xZero = -xLow*xScale+xLeftOffset
yZero = yTopOffset+plotHeight+yLow*yScale
for point in xCoord:
drawSpace.addElement(svg.line(xZero,yZero,point[0],point[1], "red", 1))
else:
if connectdot:
pass
#drawSpace.drawPolygon(xCoord,edgeColor=plotColor,closed=0)
else:
pass
for i, item in enumerate(xCoord):
if dataLabel and dataLabel[i] in specialCases:
drawSpace.addElement(svg.rect(item[0]-3, item[1]-3, 6, 6, "none", "green", 0.5))
#drawSpace.drawCross(item[0],item[1],color=pid.blue,size=5)
else:
drawSpace.addElement(svg.line(item[0],item[1]+5,item[0],item[1]-5,symbolColor,1))
drawSpace.addElement(svg.line(item[0]+5,item[1],item[0]-5,item[1],symbolColor,1))
if showLabel and dataLabel:
pass
drawSpace.addElement(svg.text(item[0], item[1]+14, dataLabel[i], labelFontS,
labelFontF, text_anchor="middle", style="stroke:blue;stroke-width:0.5;"))
#canvas.drawString(, item[0]- canvas.stringWidth(dataLabel[i],
# font=labelFont)/2, item[1]+14, font=labelFont, color=pid.blue)
#draw scale
#scaleFont=pid.Font(ttf="cour",size=14,bold=1)
x=xLow
for i in range(stepX+1):
xc=xLeftOffset+(x-xLow)*xScale
drawSpace.addElement(svg.line(xc,yTopOffset+plotHeight,xc,yTopOffset+plotHeight+5, axesColor, 1))
strX = cformat(d=x, rank=rank)
drawSpace.addElement(svg.text(xc,yTopOffset+plotHeight+20,strX,13, "courier", text_anchor="middle"))
x+= (xTop - xLow)/stepX
y=yLow
for i in range(stepY+1):
yc=yTopOffset+plotHeight-(y-yLow)*yScale
drawSpace.addElement(svg.line(xLeftOffset,yc,xLeftOffset-5,yc, axesColor, 1))
strY = cformat(d=y, rank=rank)
drawSpace.addElement(svg.text(xLeftOffset-10,yc+5,strY,13, "courier", text_anchor="end"))
y+= (yTop - yLow)/stepY
#draw label
labelFontF = "verdana"
labelFontS = 17
if XLabel:
drawSpace.addElement(svg.text(xLeftOffset+plotWidth/2.0,
yTopOffset+plotHeight+yBottomOffset-10,XLabel,
labelFontS, labelFontF, text_anchor="middle"))
if YLabel:
drawSpace.addElement(svg.text(xLeftOffset-50,
yTopOffset+plotHeight/2,YLabel,
labelFontS, labelFontF, text_anchor="middle", style="writing-mode:tb-rl", transform="rotate(270 %d %d)" % (xLeftOffset-50, yTopOffset+plotHeight/2)))
#drawSpace.drawString(YLabel, xLeftOffset-50, yTopOffset+plotHeight- (plotHeight-drawSpace.stringWidth(YLabel,font=labelFont))/2.0,
# font=labelFont,color=labelColor,angle=90)
if fitcurve:
sys.argv = [ "mod_python" ]
#from numarray import linear_algebra as la
#from numarray import ones, array, dot, swapaxes
fitYY = array(dataYPrimary)
fitXX = array([ones(len(dataXPrimary)),dataXPrimary])
AA = dot(fitXX,swapaxes(fitXX,0,1))
BB = dot(fitXX,fitYY)
bb = la.linear_least_squares(AA,BB)[0]
xc1 = xLeftOffset
yc1 = yTopOffset+plotHeight-(bb[0]+bb[1]*xLow-yLow)*yScale
if yc1 > yTopOffset+plotHeight:
yc1 = yTopOffset+plotHeight
xc1 = (yLow-bb[0])/bb[1]
xc1=(xc1-xLow)*xScale+xLeftOffset
elif yc1 < yTopOffset:
yc1 = yTopOffset
xc1 = (yTop-bb[0])/bb[1]
xc1=(xc1-xLow)*xScale+xLeftOffset
else:
pass
xc2 = xLeftOffset + plotWidth
yc2 = yTopOffset+plotHeight-(bb[0]+bb[1]*xTop-yLow)*yScale
if yc2 > yTopOffset+plotHeight:
yc2 = yTopOffset+plotHeight
xc2 = (yLow-bb[0])/bb[1]
xc2=(xc2-xLow)*xScale+xLeftOffset
elif yc2 < yTopOffset:
yc2 = yTopOffset
xc2 = (yTop-bb[0])/bb[1]
xc2=(xc2-xLow)*xScale+xLeftOffset
else:
pass
drawSpace.addElement(svg.line(xc1,yc1,xc2,yc2,"green", 1))
if displayR:
labelFontF = "trebuc"
labelFontS = 14
NNN = len(dataX)
corr = webqtlUtil.calCorrelation(dataXPrimary,dataYPrimary,NNN)[0]
if NNN < 3:
corrPValue = 1.0
else:
if abs(corr) >= 1.0:
corrPValue = 0.0
else:
ZValue = 0.5*log((1.0+corr)/(1.0-corr))
ZValue = ZValue*sqrt(NNN-3)
corrPValue = 2.0*(1.0 - reaper.normp(abs(ZValue)))
NStr = "N of Cases=%d" % NNN
if rank == 1:
corrStr = "Spearman's r=%1.3f P=%3.2E" % (corr, corrPValue)
else:
corrStr = "Pearson's r=%1.3f P=%3.2E" % (corr, corrPValue)
drawSpace.addElement(svg.text(xLeftOffset,yTopOffset-10,NStr,
labelFontS, labelFontF, text_anchor="start"))
drawSpace.addElement(svg.text(xLeftOffset+plotWidth,yTopOffset-25,corrStr,
labelFontS, labelFontF, text_anchor="end"))
"""
"""
return
# This function determines the scale of the plot
def detScaleOld(min,max):
if min>=max:
return None
elif min == -1.0 and max == 1.0:
return [-1.2,1.2,12]
else:
a=max-min
b=floor(log10(a))
c=pow(10.0,b)
if a < c*5.0:
c/=2.0
#print a,b,c
low=c*floor(min/c)
high=c*ceil(max/c)
return [low,high,round((high-low)/c)]
def detScale(min=0,max=0,bufferSpace=3):
if min>=max:
return None
elif min == -1.0 and max == 1.0:
return [-1.2,1.2,12]
else:
a=max-min
if max != 0:
max += 0.1*a
if min != 0:
if min > 0 and min < 0.1*a:
min = 0.0
else:
min -= 0.1*a
a=max-min
b=floor(log10(a))
c=pow(10.0,b)
low=c*floor(min/c)
high=c*ceil(max/c)
n = round((high-low)/c)
div = 2.0
while n < 5 or n > 15:
if n < 5:
c /= div
else:
c *= div
if div == 2.0:
div =5.0
else:
div =2.0
low=c*floor(min/c)
high=c*ceil(max/c)
n = round((high-low)/c)
return [low,high,n]
def colorSpectrumOld(n):
if n == 1:
return [pid.Color(1,0,0)]
elif n == 2:
return [pid.Color(1,0,0),pid.Color(0,0,1)]
elif n == 3:
return [pid.Color(1,0,0),pid.Color(0,1,0),pid.Color(0,0,1)]
else:
step = 2.0/(n-1)
red = 1.0
green = 0.0
blue = 0.0
colors = [pid.Color(red,green,blue)]
i = 1
greenpeak = 0
while i < n:
if red >= step:
red -= step
green += step
if green >= 1.0:
greenpeak = 1
blue += green -1.0
green = 1.0
else:
red = 0.0
if greenpeak:
green -= step
blue += step
else:
green += step
if green >= 1.0:
greenpeak = 1
blue += green -1.0
green = 2.0 -green
elif green < 0.0:
green = 0.0
else:
pass
colors.append(pid.Color(red,green,blue))
i += 1
return colors
def bluefunc(x):
return 1.0 / (1.0 + exp(-10*(x-0.6)))
def redfunc(x):
return 1.0 / (1.0 + exp(10*(x-0.5)))
def greenfunc(x):
return 1 - pow(redfunc(x+0.2),2) - bluefunc(x-0.3)
def colorSpectrum(n=100):
multiple = 10
if n == 1:
return [pid.Color(1,0,0)]
elif n == 2:
return [pid.Color(1,0,0),pid.Color(0,0,1)]
elif n == 3:
return [pid.Color(1,0,0),pid.Color(0,1,0),pid.Color(0,0,1)]
N = n*multiple
out = [None]*N;
for i in range(N):
x = float(i)/N
out[i] = pid.Color(redfunc(x), greenfunc(x), bluefunc(x));
out2 = [out[0]]
step = N/float(n-1)
j = 0
for i in range(n-2):
j += step
out2.append(out[int(j)])
out2.append(out[-1])
return out2
def colorSpectrumSVG(n=100):
multiple = 10
if n == 1:
return ["rgb(255,0,0)"]
elif n == 2:
return ["rgb(255,0,0)","rgb(0,0,255)"]
elif n == 3:
return ["rgb(255,0,0)","rgb(0,255,0)","rgb(0,0,255)"]
N = n*multiple
out = [None]*N;
for i in range(N):
x = float(i)/N
out[i] = "rgb(%d, %d, %d)" % (redfunc(x)*255, greenfunc(x)*255, bluefunc(x)*255);
out2 = [out[0]]
step = N/float(n-1)
j = 0
for i in range(n-2):
j += step
out2.append(out[int(j)])
out2.append(out[-1])
return out2
def BWSpectrum(n=100):
multiple = 10
if n == 1:
return [pid.Color(0,0,0)]
elif n == 2:
return [pid.Color(0,0,0),pid.Color(1,1,1)]
elif n == 3:
return [pid.Color(0,0,0),pid.Color(0.5,0.5,0.5),pid.Color(1,1,1)]
step = 1.0/n
x = 0.0
out = []
for i in range(n):
out.append(pid.Color(x,x,x));
x += step
return out
def _test():
import doctest
doctest.testmod()
if __name__=="__main__":
_test()
|
agpl-3.0
|
mikeireland/pynrm
|
go.py
|
1
|
3044
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 2 13:49:11 2014
@author: mireland
A script for testing... Change this to try out your own analysis.
"""
import astropy.io.fits as pyfits
import numpy as np
import matplotlib.pyplot as plt
from azimuthalAverage import *
# This includes an AO Instrument called "aoinst"
import pypoise
import nirc2
import glob
import pdb
#Create a pypoise instance with a nirc2 AO instrument
pp = pypoise.PYPOISE(nirc2.NIRC2())
plt.ion()
#Reduction Directory - Lp full pupil
pp.aoinst.rdir = '/Users/mireland/tel/nirc2/redux/generic2015/'
pp.aoinst.cdir = '/Users/mireland/tel/nirc2/redux/TauPAH15/'
#Data directory
pp.aoinst.ddir = '/Users/mireland/data/nirc2/151128/'
pp.aoinst.read_summary_csv()
if(False):
pp.process_block(fstart='n1251.fits',fend='n1293.fits', dither=True)
if(False):
pp.process_block(fstart='n1493.fits',fend='n1517.fits', dither=True)
targname = 'AB Aur'
targname = 'SU Aur'
targname = 'RY Tau'
if(True):
#The argument "target_file" is just there to determine which object is the target.
summary_files = pp.poise_process(target=targname, use_powerspect=False)
print(summary_files)
if(True):
# summary_files = glob.glob('*LkCa*poise_cube*.fits')
implane_file = pp.aoinst.cdir + targname + '_implane.fits'
pxscale = 5.0
#pdb.set_trace()
if (True):
kp_implane = pp.kp_to_implane(summary_files=summary_files,
out_file=implane_file, sz=141, pxscale=pxscale, use_powerspect=False)
if (True):
#Automatic from here...
pgrid, crat, crat_sig, chi2, best_rchi2 = pp.implane_fit_binary(implane_file, maxrad=250)
print "Grid Fit: ", pgrid
pgrid = np.array(pgrid)
if (pgrid[2] > 0.5):
print "Contrast too high to use kerphase for fitting (i.e. near-equal binary)."
else:
p,errs,cov = pp.kp_binary_fit(summary_files,pgrid)
fitfile = open(targname + '_binaryfit.txt','w')
fitfile.write('Separation (mas) & Position angle (degs) & Contrast \\\\\n')
fitfile.write('{0:5.2f} $\pm$ {1:5.2f} & {2:5.2f} $\pm$ {3:5.2f} & {4:6.4f} $\pm$ {5:6.4f} \\\\ \n'.format(\
p[0],errs[0], p[1],errs[1], p[2],errs[2]))
fitfile.write('Contrast (mags) & Separation (mas) & Position angle (degs) \\\\\n')
fit_crat = -2.5*np.log10(p[2])
fit_crat_sig = 2.5/np.log(10)*errs[2]/p[2]
fitfile.write('{0:5.2f} $\pm$ {1:5.2f} & {2:5.2f} $\pm$ {3:5.2f} & {4:5.3f} $\pm$ {5:5.3f} \\\\ \n'.format(\
fit_crat, fit_crat_sig, p[0],errs[0], p[1],errs[1] ))
fitfile.close()
a = azimuthalAverage(crat_sig*np.sqrt(best_rchi2), returnradii=True,binsize=1)
sep_null = a[0]*pxscale
contrast_null = -2.5*np.log10(5*a[1])
plt.clf()
plt.plot(sep_null, contrast_null)
plt.title(targname)
plt.xlabel("Separation (milli-arcsec)")
plt.ylabel("5-sigma contrast (mags)")
sep_out = np.arange(20,301,10)
contrast_out = np.interp(sep_out, sep_null, contrast_null)
for i in range(len(sep_out)):
print '{0:4d} {1:5.1f}'.format(int(sep_out[i]),contrast_out[i])
plt.axis((0,300,2,7))
plt.savefig(pp.aoinst.cdir + targname + '_contrast_curve.png')
|
mit
|
Noviat/account-financial-reporting-V3-intrastat
|
account_journal_report_xls/wizard/print_journal_wizard.py
|
27
|
8349
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2014 Noviat nv/sa (www.noviat.com). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tools.translate import _
from openerp.osv import orm, fields
from openerp.addons.account.wizard.account_report_common_journal \
import account_common_journal_report
import logging
_logger = logging.getLogger(__name__)
class account_print_journal_xls(orm.TransientModel):
_inherit = 'account.print.journal'
_name = 'account.print.journal.xls'
_description = 'Print/Export Journal'
_columns = {
'journal_ids': fields.many2many(
'account.journal',
'account_print_journal_xls_journal_rel',
'journal_xls_id',
'journal_id',
string='Journals',
required=True),
'group_entries': fields.boolean(
'Group Entries',
help="Group entries with same General Account & Tax Code."),
}
_defaults = {
'group_entries': True,
}
def fields_get(self, cr, uid, fields=None, context=None):
res = super(account_print_journal_xls, self).fields_get(
cr, uid, fields, context)
if context.get('print_by') == 'fiscalyear':
if 'fiscalyear_id' in res:
res['fiscalyear_id']['required'] = True
if 'period_from' in res:
res['period_from']['readonly'] = True
if 'period_to' in res:
res['period_to']['readonly'] = True
else:
if 'period_from' in res:
res['period_from']['required'] = True
if 'period_to' in res:
res['period_to']['required'] = True
return res
def fy_period_ids(self, cr, uid, fiscalyear_id):
""" returns all periods from a fiscalyear sorted by date """
fy_period_ids = []
cr.execute('''
SELECT id, coalesce(special, False) AS special
FROM account_period
WHERE fiscalyear_id=%s ORDER BY date_start, special DESC''',
(fiscalyear_id,))
res = cr.fetchall()
if res:
fy_period_ids = [x[0] for x in res]
return fy_period_ids
def onchange_fiscalyear_id(self, cr, uid, ids, fiscalyear_id=False,
context=None):
res = {'value': {}}
if context.get('print_by') == 'fiscalyear':
# get period_from/to with opening/close periods
fy_period_ids = self.fy_period_ids(cr, uid, fiscalyear_id)
if fy_period_ids:
res['value']['period_from'] = fy_period_ids[0]
res['value']['period_to'] = fy_period_ids[-1]
return res
def fields_view_get(self, cr, uid, view_id=None, view_type='form',
context=None, toolbar=False, submenu=False):
""" skip account.common.journal.report,fields_view_get
(adds domain filter on journal type) """
return super(account_common_journal_report, self).\
fields_view_get(cr, uid, view_id, view_type, context, toolbar,
submenu)
def xls_export(self, cr, uid, ids, context=None):
return self.print_report(cr, uid, ids, context=context)
def print_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
move_obj = self.pool.get('account.move')
print_by = context.get('print_by')
wiz_form = self.browse(cr, uid, ids)[0]
fiscalyear_id = wiz_form.fiscalyear_id.id
company_id = wiz_form.company_id.id
if print_by == 'fiscalyear':
wiz_period_ids = self.fy_period_ids(cr, uid, fiscalyear_id)
else:
period_from = wiz_form.period_from
period_to = wiz_form.period_to
cr.execute("""
SELECT id, coalesce(special, False) AS special
FROM account_period ap
WHERE ap.date_start>=%s AND ap.date_stop<=%s AND company_id=%s
ORDER BY date_start, special DESC""",
(period_from.date_start,
period_to.date_stop,
company_id))
wiz_period_ids = map(lambda x: x[0], cr.fetchall())
wiz_journal_ids = [j.id for j in wiz_form.journal_ids]
# sort journals
cr.execute('SELECT id FROM account_journal '
'WHERE id IN %s ORDER BY type DESC',
(tuple(wiz_journal_ids),))
wiz_journal_ids = map(lambda x: x[0], cr.fetchall())
datas = {
'model': 'account.journal',
'print_by': print_by,
'sort_selection': wiz_form.sort_selection,
'target_move': wiz_form.target_move,
'display_currency': wiz_form.amount_currency,
'group_entries': wiz_form.group_entries,
}
if wiz_form.target_move == 'posted':
move_states = ['posted']
else:
move_states = ['draft', 'posted']
if print_by == 'fiscalyear':
journal_fy_ids = []
for journal_id in wiz_journal_ids:
aml_ids = move_obj.search(cr, uid,
[('journal_id', '=', journal_id),
('period_id', 'in', wiz_period_ids),
('state', 'in', move_states)],
limit=1)
if aml_ids:
journal_fy_ids.append((journal_id, fiscalyear_id))
if not journal_fy_ids:
raise orm.except_orm(
_('No Data Available'),
_('No records found for your selection!'))
datas.update({
'ids': [x[0] for x in journal_fy_ids],
'journal_fy_ids': journal_fy_ids,
})
else:
# perform account.move.line query in stead of
# 'account.journal.period' since this table is not always reliable
journal_period_ids = []
for journal_id in wiz_journal_ids:
period_ids = []
for period_id in wiz_period_ids:
aml_ids = move_obj.search(cr, uid,
[('journal_id', '=', journal_id),
('period_id', '=', period_id),
('state', 'in', move_states)],
limit=1)
if aml_ids:
period_ids.append(period_id)
if period_ids:
journal_period_ids.append((journal_id, period_ids))
if not journal_period_ids:
raise orm.except_orm(
_('No Data Available'),
_('No records found for your selection!'))
datas.update({
'ids': [x[0] for x in journal_period_ids],
'journal_period_ids': journal_period_ids,
})
if context.get('xls_export'):
return {'type': 'ir.actions.report.xml',
'report_name': 'nov.account.journal.xls',
'datas': datas}
else:
return {
'type': 'ir.actions.report.xml',
'report_name': 'nov.account.journal.print',
'datas': datas}
|
agpl-3.0
|
CERT-Solucom/certitude
|
components/scanner/flatevaluators/services.py
|
2
|
1432
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
CERTitude: the seeker of IOC
Copyright (c) 2016 CERT-W
Contact: [email protected]
Contributors: @iansus, @nervous, @fschwebel
CERTitude is under licence GPL-2.0:
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
'''
import template
class Evaluator(template.EvaluatorInterface):
evalList = ['descriptiveName', 'mode', 'path', 'pathmd5sum', 'status', 'name']
def __init__(self, logger, ioc, remoteCommand, keepFiles, confidential, dirname):
template.EvaluatorInterface.__init__(self, logger, ioc, remoteCommand, keepFiles, confidential, dirname)
self.setEvaluatorParams(evalList=Evaluator.evalList, name='services', command='collector getservices')
|
gpl-2.0
|
akash1808/nova_test_latest
|
nova/tests/unit/virt/libvirt/test_host.py
|
43
|
42406
|
# Copyright 2010 OpenStack Foundation
# Copyright 2012 University Of Minho
# Copyright 2014 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import uuid
import eventlet
from eventlet import greenthread
import mock
from nova.compute import arch
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.virt.libvirt import fakelibvirt
from nova.virt import event
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import driver as libvirt_driver
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import host
host.libvirt = fakelibvirt
libvirt_guest.libvirt = fakelibvirt
class FakeVirtDomain(object):
def __init__(self, id=-1, name=None):
self._id = id
self._name = name
self._uuid = str(uuid.uuid4())
def name(self):
return self._name
def ID(self):
return self._id
def UUIDString(self):
return self._uuid
class HostTestCase(test.NoDBTestCase):
def setUp(self):
super(HostTestCase, self).setUp()
self.useFixture(fakelibvirt.FakeLibvirtFixture())
self.host = host.Host("qemu:///system")
@mock.patch.object(fakelibvirt.virConnect, "registerCloseCallback")
def test_close_callback(self, mock_close):
self.close_callback = None
def set_close_callback(cb, opaque):
self.close_callback = cb
mock_close.side_effect = set_close_callback
# verify that the driver registers for the close callback
self.host.get_connection()
self.assertTrue(self.close_callback)
@mock.patch.object(fakelibvirt.virConnect, "registerCloseCallback")
def test_close_callback_bad_signature(self, mock_close):
'''Validates that a connection to libvirt exist,
even when registerCloseCallback method has a different
number of arguments in the libvirt python library.
'''
mock_close.side_effect = TypeError('dd')
connection = self.host.get_connection()
self.assertTrue(connection)
@mock.patch.object(fakelibvirt.virConnect, "registerCloseCallback")
def test_close_callback_not_defined(self, mock_close):
'''Validates that a connection to libvirt exist,
even when registerCloseCallback method missing from
the libvirt python library.
'''
mock_close.side_effect = AttributeError('dd')
connection = self.host.get_connection()
self.assertTrue(connection)
@mock.patch.object(fakelibvirt.virConnect, "getLibVersion")
def test_broken_connection(self, mock_ver):
for (error, domain) in (
(fakelibvirt.VIR_ERR_SYSTEM_ERROR,
fakelibvirt.VIR_FROM_REMOTE),
(fakelibvirt.VIR_ERR_SYSTEM_ERROR,
fakelibvirt.VIR_FROM_RPC),
(fakelibvirt.VIR_ERR_INTERNAL_ERROR,
fakelibvirt.VIR_FROM_RPC)):
conn = self.host._connect("qemu:///system", False)
mock_ver.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"Connection broken",
error_code=error,
error_domain=domain)
self.assertFalse(self.host._test_connection(conn))
@mock.patch.object(host, 'LOG')
def test_connect_auth_cb_exception(self, log_mock):
creds = dict(authname='nova', password='verybadpass')
self.assertRaises(exception.NovaException,
self.host._connect_auth_cb, creds, False)
self.assertEqual(0, len(log_mock.method_calls),
'LOG should not be used in _connect_auth_cb.')
@mock.patch.object(greenthread, 'spawn_after')
def test_event_dispatch(self, mock_spawn_after):
# Validate that the libvirt self-pipe for forwarding
# events between threads is working sanely
def handler(event):
got_events.append(event)
hostimpl = host.Host("qemu:///system",
lifecycle_event_handler=handler)
got_events = []
hostimpl._init_events_pipe()
event1 = event.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
event.EVENT_LIFECYCLE_STARTED)
event2 = event.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
event.EVENT_LIFECYCLE_PAUSED)
hostimpl._queue_event(event1)
hostimpl._queue_event(event2)
hostimpl._dispatch_events()
want_events = [event1, event2]
self.assertEqual(want_events, got_events)
event3 = event.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
event.EVENT_LIFECYCLE_RESUMED)
event4 = event.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
event.EVENT_LIFECYCLE_STOPPED)
hostimpl._queue_event(event3)
hostimpl._queue_event(event4)
hostimpl._dispatch_events()
want_events = [event1, event2, event3]
self.assertEqual(want_events, got_events)
# STOPPED is delayed so it's handled separately
mock_spawn_after.assert_called_once_with(
hostimpl._lifecycle_delay, hostimpl._event_emit, event4)
def test_event_lifecycle(self):
got_events = []
# Validate that libvirt events are correctly translated
# to Nova events
def spawn_after(seconds, func, *args, **kwargs):
got_events.append(args[0])
return mock.Mock(spec=greenthread.GreenThread)
greenthread.spawn_after = mock.Mock(side_effect=spawn_after)
hostimpl = host.Host("qemu:///system",
lifecycle_event_handler=lambda e: None)
conn = hostimpl.get_connection()
hostimpl._init_events_pipe()
fake_dom_xml = """
<domain type='kvm'>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
</devices>
</domain>
"""
dom = fakelibvirt.Domain(conn,
fake_dom_xml,
False)
hostimpl._event_lifecycle_callback(
conn, dom, fakelibvirt.VIR_DOMAIN_EVENT_STOPPED, 0, hostimpl)
hostimpl._dispatch_events()
self.assertEqual(len(got_events), 1)
self.assertIsInstance(got_events[0], event.LifecycleEvent)
self.assertEqual(got_events[0].uuid,
"cef19ce0-0ca2-11df-855d-b19fbce37686")
self.assertEqual(got_events[0].transition,
event.EVENT_LIFECYCLE_STOPPED)
def test_event_emit_delayed_call_delayed(self):
ev = event.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
event.EVENT_LIFECYCLE_STOPPED)
for uri in ("qemu:///system", "xen:///"):
spawn_after_mock = mock.Mock()
greenthread.spawn_after = spawn_after_mock
hostimpl = host.Host(uri,
lifecycle_event_handler=lambda e: None)
hostimpl._event_emit_delayed(ev)
spawn_after_mock.assert_called_once_with(
15, hostimpl._event_emit, ev)
@mock.patch.object(greenthread, 'spawn_after')
def test_event_emit_delayed_call_delayed_pending(self, spawn_after_mock):
hostimpl = host.Host("xen:///",
lifecycle_event_handler=lambda e: None)
uuid = "cef19ce0-0ca2-11df-855d-b19fbce37686"
gt_mock = mock.Mock()
hostimpl._events_delayed[uuid] = gt_mock
ev = event.LifecycleEvent(
uuid, event.EVENT_LIFECYCLE_STOPPED)
hostimpl._event_emit_delayed(ev)
gt_mock.cancel.assert_called_once_with()
self.assertTrue(spawn_after_mock.called)
def test_event_delayed_cleanup(self):
hostimpl = host.Host("xen:///",
lifecycle_event_handler=lambda e: None)
uuid = "cef19ce0-0ca2-11df-855d-b19fbce37686"
ev = event.LifecycleEvent(
uuid, event.EVENT_LIFECYCLE_STARTED)
gt_mock = mock.Mock()
hostimpl._events_delayed[uuid] = gt_mock
hostimpl._event_emit_delayed(ev)
gt_mock.cancel.assert_called_once_with()
self.assertNotIn(uuid, hostimpl._events_delayed.keys())
@mock.patch.object(fakelibvirt.virConnect, "domainEventRegisterAny")
@mock.patch.object(host.Host, "_connect")
def test_get_connection_serial(self, mock_conn, mock_event):
def get_conn_currency(host):
host.get_connection().getLibVersion()
def connect_with_block(*a, **k):
# enough to allow another connect to run
eventlet.sleep(0)
self.connect_calls += 1
return fakelibvirt.openAuth("qemu:///system",
[[], lambda: 1, None], 0)
def fake_register(*a, **k):
self.register_calls += 1
self.connect_calls = 0
self.register_calls = 0
mock_conn.side_effect = connect_with_block
mock_event.side_effect = fake_register
# call serially
get_conn_currency(self.host)
get_conn_currency(self.host)
self.assertEqual(self.connect_calls, 1)
self.assertEqual(self.register_calls, 1)
@mock.patch.object(fakelibvirt.virConnect, "domainEventRegisterAny")
@mock.patch.object(host.Host, "_connect")
def test_get_connection_concurrency(self, mock_conn, mock_event):
def get_conn_currency(host):
host.get_connection().getLibVersion()
def connect_with_block(*a, **k):
# enough to allow another connect to run
eventlet.sleep(0)
self.connect_calls += 1
return fakelibvirt.openAuth("qemu:///system",
[[], lambda: 1, None], 0)
def fake_register(*a, **k):
self.register_calls += 1
self.connect_calls = 0
self.register_calls = 0
mock_conn.side_effect = connect_with_block
mock_event.side_effect = fake_register
# call concurrently
thr1 = eventlet.spawn(get_conn_currency, self.host)
thr2 = eventlet.spawn(get_conn_currency, self.host)
# let threads run
eventlet.sleep(0)
thr1.wait()
thr2.wait()
self.assertEqual(self.connect_calls, 1)
self.assertEqual(self.register_calls, 1)
@mock.patch.object(fakelibvirt.virConnect, "getLibVersion")
@mock.patch.object(fakelibvirt.virConnect, "getVersion")
@mock.patch.object(fakelibvirt.virConnect, "getType")
def test_has_min_version(self, fake_hv_type, fake_hv_ver, fake_lv_ver):
fake_lv_ver.return_value = 1002003
fake_hv_ver.return_value = 4005006
fake_hv_type.return_value = 'xyz'
lv_ver = (1, 2, 3)
hv_ver = (4, 5, 6)
hv_type = 'xyz'
self.assertTrue(self.host.has_min_version(lv_ver, hv_ver, hv_type))
self.assertFalse(self.host.has_min_version(lv_ver, hv_ver, 'abc'))
self.assertFalse(self.host.has_min_version(lv_ver, (4, 5, 7), hv_type))
self.assertFalse(self.host.has_min_version((1, 3, 3), hv_ver, hv_type))
self.assertTrue(self.host.has_min_version(lv_ver, hv_ver, None))
self.assertTrue(self.host.has_min_version(lv_ver, None, hv_type))
self.assertTrue(self.host.has_min_version(None, hv_ver, hv_type))
@mock.patch.object(fakelibvirt.virConnect, "getLibVersion")
@mock.patch.object(fakelibvirt.virConnect, "getVersion")
@mock.patch.object(fakelibvirt.virConnect, "getType")
def test_has_version(self, fake_hv_type, fake_hv_ver, fake_lv_ver):
fake_lv_ver.return_value = 1002003
fake_hv_ver.return_value = 4005006
fake_hv_type.return_value = 'xyz'
lv_ver = (1, 2, 3)
hv_ver = (4, 5, 6)
hv_type = 'xyz'
self.assertTrue(self.host.has_version(lv_ver, hv_ver, hv_type))
for lv_ver_ in [(1, 2, 2), (1, 2, 4)]:
self.assertFalse(self.host.has_version(lv_ver_, hv_ver, hv_type))
for hv_ver_ in [(4, 4, 6), (4, 6, 6)]:
self.assertFalse(self.host.has_version(lv_ver, hv_ver_, hv_type))
self.assertFalse(self.host.has_version(lv_ver, hv_ver, 'abc'))
self.assertTrue(self.host.has_min_version(lv_ver, hv_ver, None))
self.assertTrue(self.host.has_min_version(lv_ver, None, hv_type))
self.assertTrue(self.host.has_min_version(None, hv_ver, hv_type))
@mock.patch.object(fakelibvirt.virConnect, "lookupByID")
def test_get_domain_by_id(self, fake_lookup):
dom = fakelibvirt.virDomain(self.host.get_connection(),
"<domain id='7'/>")
fake_lookup.return_value = dom
self.assertEqual(dom, self.host._get_domain_by_id(7))
fake_lookup.assert_called_once_with(7)
@mock.patch.object(fakelibvirt.virConnect, "lookupByID")
def test_get_domain_by_id_raises(self, fake_lookup):
fake_lookup.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'Domain not found: no domain with matching id 7',
error_code=fakelibvirt.VIR_ERR_NO_DOMAIN,
error_domain=fakelibvirt.VIR_FROM_QEMU)
self.assertRaises(exception.InstanceNotFound,
self.host._get_domain_by_id,
7)
fake_lookup.assert_called_once_with(7)
@mock.patch.object(fakelibvirt.virConnect, "lookupByName")
def test_get_domain_by_name(self, fake_lookup):
dom = fakelibvirt.virDomain(self.host.get_connection(),
"<domain id='7'/>")
fake_lookup.return_value = dom
self.assertEqual(dom, self.host._get_domain_by_name("wibble"))
fake_lookup.assert_called_once_with("wibble")
@mock.patch.object(fakelibvirt.virConnect, "lookupByName")
def test_get_domain_by_name_raises(self, fake_lookup):
fake_lookup.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'Domain not found: no domain with matching name',
error_code=fakelibvirt.VIR_ERR_NO_DOMAIN,
error_domain=fakelibvirt.VIR_FROM_QEMU)
self.assertRaises(exception.InstanceNotFound,
self.host._get_domain_by_name,
"wibble")
fake_lookup.assert_called_once_with("wibble")
@mock.patch.object(host.Host, "_get_domain_by_name")
def test_get_domain(self, fake_get_domain):
dom = fakelibvirt.virDomain(self.host.get_connection(),
"<domain id='7'/>")
fake_get_domain.return_value = dom
instance = objects.Instance(id="124")
self.assertEqual(dom, self.host.get_domain(instance))
fake_get_domain.assert_called_once_with("instance-0000007c")
@mock.patch.object(host.Host, "_get_domain_by_name")
def test_get_guest(self, fake_get_domain):
dom = fakelibvirt.virDomain(self.host.get_connection(),
"<domain id='7'/>")
fake_get_domain.return_value = dom
instance = objects.Instance(id="124")
guest = self.host.get_guest(instance)
self.assertEqual(dom, guest._domain)
self.assertIsInstance(guest, libvirt_guest.Guest)
fake_get_domain.assert_called_once_with("instance-0000007c")
@mock.patch.object(fakelibvirt.Connection, "listAllDomains")
def test_list_instance_domains_fast(self, mock_list_all):
vm1 = FakeVirtDomain(id=3, name="instance00000001")
vm2 = FakeVirtDomain(id=17, name="instance00000002")
vm3 = FakeVirtDomain(name="instance00000003")
vm4 = FakeVirtDomain(name="instance00000004")
def fake_list_all(flags):
vms = []
if flags & fakelibvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE:
vms.extend([vm1, vm2])
if flags & fakelibvirt.VIR_CONNECT_LIST_DOMAINS_INACTIVE:
vms.extend([vm3, vm4])
return vms
mock_list_all.side_effect = fake_list_all
doms = self.host._list_instance_domains_fast()
mock_list_all.assert_called_once_with(
fakelibvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE)
mock_list_all.reset_mock()
self.assertEqual(len(doms), 2)
self.assertEqual(doms[0].name(), vm1.name())
self.assertEqual(doms[1].name(), vm2.name())
doms = self.host._list_instance_domains_fast(only_running=False)
mock_list_all.assert_called_once_with(
fakelibvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE |
fakelibvirt.VIR_CONNECT_LIST_DOMAINS_INACTIVE)
self.assertEqual(len(doms), 4)
self.assertEqual(doms[0].name(), vm1.name())
self.assertEqual(doms[1].name(), vm2.name())
self.assertEqual(doms[2].name(), vm3.name())
self.assertEqual(doms[3].name(), vm4.name())
@mock.patch.object(fakelibvirt.Connection, "numOfDomains")
@mock.patch.object(fakelibvirt.Connection, "listDefinedDomains")
@mock.patch.object(fakelibvirt.Connection, "listDomainsID")
@mock.patch.object(host.Host, "_get_domain_by_name")
@mock.patch.object(host.Host, "_get_domain_by_id")
def test_list_instance_domains_slow(self,
mock_get_id, mock_get_name,
mock_list_ids, mock_list_names,
mock_num_ids):
vm1 = FakeVirtDomain(id=3, name="instance00000001")
vm2 = FakeVirtDomain(id=17, name="instance00000002")
vm3 = FakeVirtDomain(name="instance00000003")
vm4 = FakeVirtDomain(name="instance00000004")
vms = [vm1, vm2, vm3, vm4]
def fake_get_domain_by_id(id):
for vm in vms:
if vm.ID() == id:
return vm
raise exception.InstanceNotFound(instance_id=id)
def fake_get_domain_by_name(name):
for vm in vms:
if vm.name() == name:
return vm
raise exception.InstanceNotFound(instance_id=name)
def fake_list_ids():
# Include one ID that no longer exists
return [vm1.ID(), vm2.ID(), 666]
def fake_list_names():
# Include one name that no longer exists and
# one dup from running list to show race in
# transition from inactive -> running
return [vm1.name(), vm3.name(), vm4.name(), "fishfood"]
mock_get_id.side_effect = fake_get_domain_by_id
mock_get_name.side_effect = fake_get_domain_by_name
mock_list_ids.side_effect = fake_list_ids
mock_list_names.side_effect = fake_list_names
mock_num_ids.return_value = 2
doms = self.host._list_instance_domains_slow()
mock_list_ids.assert_called_once_with()
mock_num_ids.assert_called_once_with()
self.assertFalse(mock_list_names.called)
mock_list_ids.reset_mock()
mock_list_names.reset_mock()
mock_num_ids.reset_mock()
self.assertEqual(len(doms), 2)
self.assertEqual(doms[0].name(), vm1.name())
self.assertEqual(doms[1].name(), vm2.name())
doms = self.host._list_instance_domains_slow(only_running=False)
mock_list_ids.assert_called_once_with()
mock_num_ids.assert_called_once_with()
mock_list_names.assert_called_once_with()
self.assertEqual(len(doms), 4)
self.assertEqual(doms[0].name(), vm1.name())
self.assertEqual(doms[1].name(), vm2.name())
self.assertEqual(doms[2].name(), vm3.name())
self.assertEqual(doms[3].name(), vm4.name())
@mock.patch.object(fakelibvirt.Connection, "listAllDomains")
@mock.patch.object(fakelibvirt.Connection, "numOfDomains")
@mock.patch.object(fakelibvirt.Connection, "listDomainsID")
@mock.patch.object(host.Host, "_get_domain_by_id")
def test_list_instance_domains_fallback(self,
mock_get_id, mock_list_ids,
mock_num_ids, mock_list_all):
vm1 = FakeVirtDomain(id=3, name="instance00000001")
vm2 = FakeVirtDomain(id=17, name="instance00000002")
vms = [vm1, vm2]
def fake_get_domain_by_id(id):
for vm in vms:
if vm.ID() == id:
return vm
raise exception.InstanceNotFound(instance_id=id)
def fake_list_doms():
return [vm1.ID(), vm2.ID()]
def fake_list_all(flags):
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"API is not supported",
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
raise ex
mock_get_id.side_effect = fake_get_domain_by_id
mock_list_ids.side_effect = fake_list_doms
mock_num_ids.return_value = 2
mock_list_all.side_effect = fake_list_all
doms = self.host.list_instance_domains()
mock_list_all.assert_called_once_with(
fakelibvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE)
mock_list_ids.assert_called_once_with()
mock_num_ids.assert_called_once_with()
self.assertEqual(len(doms), 2)
self.assertEqual(doms[0].ID(), vm1.ID())
self.assertEqual(doms[1].ID(), vm2.ID())
@mock.patch.object(host.Host, "_list_instance_domains_fast")
def test_list_instance_domains_filtering(self, mock_list):
vm0 = FakeVirtDomain(id=0, name="Domain-0") # Xen dom-0
vm1 = FakeVirtDomain(id=3, name="instance00000001")
vm2 = FakeVirtDomain(id=17, name="instance00000002")
vm3 = FakeVirtDomain(name="instance00000003")
vm4 = FakeVirtDomain(name="instance00000004")
mock_list.return_value = [vm0, vm1, vm2]
doms = self.host.list_instance_domains()
self.assertEqual(len(doms), 2)
self.assertEqual(doms[0].name(), vm1.name())
self.assertEqual(doms[1].name(), vm2.name())
mock_list.assert_called_with(True)
mock_list.return_value = [vm0, vm1, vm2, vm3, vm4]
doms = self.host.list_instance_domains(only_running=False)
self.assertEqual(len(doms), 4)
self.assertEqual(doms[0].name(), vm1.name())
self.assertEqual(doms[1].name(), vm2.name())
self.assertEqual(doms[2].name(), vm3.name())
self.assertEqual(doms[3].name(), vm4.name())
mock_list.assert_called_with(False)
mock_list.return_value = [vm0, vm1, vm2]
doms = self.host.list_instance_domains(only_guests=False)
self.assertEqual(len(doms), 3)
self.assertEqual(doms[0].name(), vm0.name())
self.assertEqual(doms[1].name(), vm1.name())
self.assertEqual(doms[2].name(), vm2.name())
mock_list.assert_called_with(True)
def test_cpu_features_bug_1217630(self):
self.host.get_connection()
# Test old version of libvirt, it shouldn't see the `aes' feature
with mock.patch('nova.virt.libvirt.host.libvirt') as mock_libvirt:
del mock_libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
caps = self.host.get_capabilities()
self.assertNotIn('aes', [x.name for x in caps.host.cpu.features])
# Cleanup the capabilities cache firstly
self.host._caps = None
# Test new version of libvirt, should find the `aes' feature
with mock.patch('nova.virt.libvirt.host.libvirt') as mock_libvirt:
mock_libvirt['VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES'] = 1
caps = self.host.get_capabilities()
self.assertIn('aes', [x.name for x in caps.host.cpu.features])
def test_cpu_features_are_not_duplicated(self):
self.host.get_connection()
# Test old version of libvirt. Should return single 'hypervisor'
with mock.patch('nova.virt.libvirt.host.libvirt') as mock_libvirt:
del mock_libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
caps = self.host.get_capabilities()
cnt = [x.name for x in caps.host.cpu.features].count('xtpr')
self.assertEqual(1, cnt)
# Cleanup the capabilities cache firstly
self.host._caps = None
# Test new version of libvirt. Should still return single 'hypervisor'
with mock.patch('nova.virt.libvirt.host.libvirt') as mock_libvirt:
mock_libvirt['VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES'] = 1
caps = self.host.get_capabilities()
cnt = [x.name for x in caps.host.cpu.features].count('xtpr')
self.assertEqual(1, cnt)
def test_baseline_cpu_not_supported(self):
# Handle just the NO_SUPPORT error
not_supported_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'this function is not supported by the connection driver:'
' virConnectBaselineCPU',
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
with mock.patch.object(fakelibvirt.virConnect, 'baselineCPU',
side_effect=not_supported_exc):
caps = self.host.get_capabilities()
self.assertEqual(vconfig.LibvirtConfigCaps, type(caps))
self.assertNotIn('aes', [x.name for x in caps.host.cpu.features])
# Clear cached result so we can test again...
self.host._caps = None
# Other errors should not be caught
other_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'other exc',
error_code=fakelibvirt.VIR_ERR_NO_DOMAIN)
with mock.patch.object(fakelibvirt.virConnect, 'baselineCPU',
side_effect=other_exc):
self.assertRaises(fakelibvirt.libvirtError,
self.host.get_capabilities)
def test_lxc_get_host_capabilities_failed(self):
with mock.patch.object(fakelibvirt.virConnect, 'baselineCPU',
return_value=-1):
caps = self.host.get_capabilities()
self.assertEqual(vconfig.LibvirtConfigCaps, type(caps))
self.assertNotIn('aes', [x.name for x in caps.host.cpu.features])
@mock.patch.object(fakelibvirt.virConnect, "getHostname")
def test_get_hostname_caching(self, mock_hostname):
mock_hostname.return_value = "foo"
self.assertEqual('foo', self.host.get_hostname())
mock_hostname.assert_called_with()
mock_hostname.reset_mock()
mock_hostname.return_value = "bar"
self.assertEqual('foo', self.host.get_hostname())
mock_hostname.assert_called_with()
@mock.patch.object(fakelibvirt.virConnect, "getType")
def test_get_driver_type(self, mock_type):
mock_type.return_value = "qemu"
self.assertEqual("qemu", self.host.get_driver_type())
mock_type.assert_called_once_with()
@mock.patch.object(fakelibvirt.virConnect, "getVersion")
def test_get_version(self, mock_version):
mock_version.return_value = 1005001
self.assertEqual(1005001, self.host.get_version())
mock_version.assert_called_once_with()
@mock.patch.object(fakelibvirt.virConnect, "secretLookupByUsage")
def test_find_secret(self, mock_sec):
"""finding secrets with various usage_type."""
expected = [
mock.call(fakelibvirt.VIR_SECRET_USAGE_TYPE_CEPH, 'rbdvol'),
mock.call(fakelibvirt.VIR_SECRET_USAGE_TYPE_CEPH, 'cephvol'),
mock.call(fakelibvirt.VIR_SECRET_USAGE_TYPE_ISCSI, 'iscsivol'),
mock.call(fakelibvirt.VIR_SECRET_USAGE_TYPE_VOLUME, 'vol')]
self.host.find_secret('rbd', 'rbdvol')
self.host.find_secret('ceph', 'cephvol')
self.host.find_secret('iscsi', 'iscsivol')
self.host.find_secret('volume', 'vol')
self.assertEqual(expected, mock_sec.mock_calls)
self.assertRaises(exception.NovaException,
self.host.find_secret, "foo", "foovol")
mock_sec.side_effect = fakelibvirt.libvirtError("")
mock_sec.side_effect.err = (66, )
self.assertIsNone(self.host.find_secret('rbd', 'rbdvol'))
@mock.patch.object(fakelibvirt.virConnect, "secretDefineXML")
def test_create_secret(self, mock_sec):
"""creating secrets with various usage_type."""
self.host.create_secret('rbd', 'rbdvol')
self.host.create_secret('ceph', 'cephvol')
self.host.create_secret('iscsi', 'iscsivol')
self.host.create_secret('volume', 'vol')
self.assertRaises(exception.NovaException,
self.host.create_secret, "foo", "foovol")
secret = mock.MagicMock()
mock_sec.return_value = secret
self.host.create_secret('iscsi', 'iscsivol', password="foo")
secret.setValue.assert_called_once_with("foo")
@mock.patch('nova.virt.libvirt.host.Host.find_secret')
def test_delete_secret(self, mock_find_secret):
"""deleting secret."""
secret = mock.MagicMock()
mock_find_secret.return_value = secret
expected = [mock.call('rbd', 'rbdvol'),
mock.call().undefine()]
self.host.delete_secret('rbd', 'rbdvol')
self.assertEqual(expected, mock_find_secret.mock_calls)
mock_find_secret.return_value = None
self.host.delete_secret("rbd", "rbdvol")
def test_get_cpu_count(self):
with mock.patch.object(host.Host, "get_connection") as mock_conn:
mock_conn().getInfo.return_value = ['zero', 'one', 'two']
self.assertEqual('two', self.host.get_cpu_count())
def test_get_memory_total(self):
with mock.patch.object(host.Host, "get_connection") as mock_conn:
mock_conn().getInfo.return_value = ['zero', 'one', 'two']
self.assertEqual('one', self.host.get_memory_mb_total())
def test_get_memory_used(self):
m = mock.mock_open(read_data="""
MemTotal: 16194180 kB
MemFree: 233092 kB
MemAvailable: 8892356 kB
Buffers: 567708 kB
Cached: 8362404 kB
SwapCached: 0 kB
Active: 8381604 kB
""")
with contextlib.nested(
mock.patch("__builtin__.open", m, create=True),
mock.patch.object(host.Host,
"get_connection"),
mock.patch('sys.platform', 'linux2'),
) as (mock_file, mock_conn, mock_platform):
mock_conn().getInfo.return_value = [
arch.X86_64, 15814, 8, 1208, 1, 1, 4, 2]
self.assertEqual(6866, self.host.get_memory_mb_used())
def test_get_memory_used_xen(self):
self.flags(virt_type='xen', group='libvirt')
class DiagFakeDomain(object):
def __init__(self, id, memmb):
self.id = id
self.memmb = memmb
def info(self):
return [0, 0, self.memmb * 1024]
def ID(self):
return self.id
def name(self):
return "instance000001"
def UUIDString(self):
return str(uuid.uuid4())
m = mock.mock_open(read_data="""
MemTotal: 16194180 kB
MemFree: 233092 kB
MemAvailable: 8892356 kB
Buffers: 567708 kB
Cached: 8362404 kB
SwapCached: 0 kB
Active: 8381604 kB
""")
with contextlib.nested(
mock.patch("__builtin__.open", m, create=True),
mock.patch.object(host.Host,
"list_instance_domains"),
mock.patch.object(libvirt_driver.LibvirtDriver,
"_conn"),
mock.patch('sys.platform', 'linux2'),
) as (mock_file, mock_list, mock_conn, mock_platform):
mock_list.return_value = [
DiagFakeDomain(0, 15814),
DiagFakeDomain(1, 750),
DiagFakeDomain(2, 1042)]
mock_conn.getInfo.return_value = [
arch.X86_64, 15814, 8, 1208, 1, 1, 4, 2]
self.assertEqual(8657, self.host.get_memory_mb_used())
mock_list.assert_called_with(only_guests=False)
def test_get_cpu_stats(self):
stats = self.host.get_cpu_stats()
self.assertEqual(
{'kernel': 5664160000000,
'idle': 1592705190000000,
'frequency': 800,
'user': 26728850000000,
'iowait': 6121490000000},
stats)
@mock.patch.object(fakelibvirt.virConnect, "defineXML")
def test_write_instance_config(self, mock_defineXML):
xml = "<x><name>foo</name></x>"
self.host.write_instance_config(xml)
mock_defineXML.assert_called_once_with(xml)
@mock.patch.object(fakelibvirt.virConnect, "nodeDeviceLookupByName")
def test_device_lookup_by_name(self, mock_nodeDeviceLookupByName):
self.host.device_lookup_by_name("foo")
mock_nodeDeviceLookupByName.assert_called_once_with("foo")
@mock.patch.object(fakelibvirt.virConnect, "listDevices")
def test_list_pci_devices(self, mock_listDevices):
self.host.list_pci_devices(8)
mock_listDevices.assert_called_once_with('pci', 8)
@mock.patch.object(fakelibvirt.virConnect, "compareCPU")
def test_compare_cpu(self, mock_compareCPU):
self.host.compare_cpu("cpuxml")
mock_compareCPU.assert_called_once_with("cpuxml", 0)
class DomainJobInfoTestCase(test.NoDBTestCase):
def setUp(self):
super(DomainJobInfoTestCase, self).setUp()
self.useFixture(fakelibvirt.FakeLibvirtFixture())
self.conn = fakelibvirt.openAuth("qemu:///system",
[[], lambda: True])
xml = ("<domain type='kvm'>"
" <name>instance-0000000a</name>"
"</domain>")
self.dom = self.conn.createXML(xml, 0)
host.DomainJobInfo._have_job_stats = True
@mock.patch.object(fakelibvirt.virDomain, "jobInfo")
@mock.patch.object(fakelibvirt.virDomain, "jobStats")
def test_job_stats(self, mock_stats, mock_info):
mock_stats.return_value = {
"type": fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED,
"memory_total": 75,
"memory_processed": 50,
"memory_remaining": 33,
"some_new_libvirt_stat_we_dont_know_about": 83
}
info = host.DomainJobInfo.for_domain(self.dom)
self.assertIsInstance(info, host.DomainJobInfo)
self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, info.type)
self.assertEqual(75, info.memory_total)
self.assertEqual(50, info.memory_processed)
self.assertEqual(33, info.memory_remaining)
self.assertEqual(0, info.disk_total)
self.assertEqual(0, info.disk_processed)
self.assertEqual(0, info.disk_remaining)
mock_stats.assert_called_once_with()
self.assertFalse(mock_info.called)
@mock.patch.object(fakelibvirt.virDomain, "jobInfo")
@mock.patch.object(fakelibvirt.virDomain, "jobStats")
def test_job_info_no_support(self, mock_stats, mock_info):
mock_stats.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"virDomainGetJobStats not implemented",
fakelibvirt.VIR_ERR_NO_SUPPORT)
mock_info.return_value = [
fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED,
100, 99, 10, 11, 12, 75, 50, 33, 1, 2, 3]
info = host.DomainJobInfo.for_domain(self.dom)
self.assertIsInstance(info, host.DomainJobInfo)
self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, info.type)
self.assertEqual(100, info.time_elapsed)
self.assertEqual(99, info.time_remaining)
self.assertEqual(10, info.data_total)
self.assertEqual(11, info.data_processed)
self.assertEqual(12, info.data_remaining)
self.assertEqual(75, info.memory_total)
self.assertEqual(50, info.memory_processed)
self.assertEqual(33, info.memory_remaining)
self.assertEqual(1, info.disk_total)
self.assertEqual(2, info.disk_processed)
self.assertEqual(3, info.disk_remaining)
mock_stats.assert_called_once_with()
mock_info.assert_called_once_with()
@mock.patch.object(fakelibvirt.virDomain, "jobInfo")
@mock.patch.object(fakelibvirt.virDomain, "jobStats")
def test_job_info_attr_error(self, mock_stats, mock_info):
mock_stats.side_effect = AttributeError("No such API")
mock_info.return_value = [
fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED,
100, 99, 10, 11, 12, 75, 50, 33, 1, 2, 3]
info = host.DomainJobInfo.for_domain(self.dom)
self.assertIsInstance(info, host.DomainJobInfo)
self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, info.type)
self.assertEqual(100, info.time_elapsed)
self.assertEqual(99, info.time_remaining)
self.assertEqual(10, info.data_total)
self.assertEqual(11, info.data_processed)
self.assertEqual(12, info.data_remaining)
self.assertEqual(75, info.memory_total)
self.assertEqual(50, info.memory_processed)
self.assertEqual(33, info.memory_remaining)
self.assertEqual(1, info.disk_total)
self.assertEqual(2, info.disk_processed)
self.assertEqual(3, info.disk_remaining)
mock_stats.assert_called_once_with()
mock_info.assert_called_once_with()
@mock.patch.object(fakelibvirt.virDomain, "jobInfo")
@mock.patch.object(fakelibvirt.virDomain, "jobStats")
def test_job_stats_no_domain(self, mock_stats, mock_info):
mock_stats.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"No such domain with UUID blah",
fakelibvirt.VIR_ERR_NO_DOMAIN)
info = host.DomainJobInfo.for_domain(self.dom)
self.assertIsInstance(info, host.DomainJobInfo)
self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_COMPLETED, info.type)
self.assertEqual(0, info.time_elapsed)
self.assertEqual(0, info.time_remaining)
self.assertEqual(0, info.memory_total)
self.assertEqual(0, info.memory_processed)
self.assertEqual(0, info.memory_remaining)
mock_stats.assert_called_once_with()
self.assertFalse(mock_info.called)
@mock.patch.object(fakelibvirt.virDomain, "jobInfo")
@mock.patch.object(fakelibvirt.virDomain, "jobStats")
def test_job_info_no_domain(self, mock_stats, mock_info):
mock_stats.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"virDomainGetJobStats not implemented",
fakelibvirt.VIR_ERR_NO_SUPPORT)
mock_info.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"No such domain with UUID blah",
fakelibvirt.VIR_ERR_NO_DOMAIN)
info = host.DomainJobInfo.for_domain(self.dom)
self.assertIsInstance(info, host.DomainJobInfo)
self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_COMPLETED, info.type)
self.assertEqual(0, info.time_elapsed)
self.assertEqual(0, info.time_remaining)
self.assertEqual(0, info.memory_total)
self.assertEqual(0, info.memory_processed)
self.assertEqual(0, info.memory_remaining)
mock_stats.assert_called_once_with()
mock_info.assert_called_once_with()
@mock.patch.object(fakelibvirt.virDomain, "jobInfo")
@mock.patch.object(fakelibvirt.virDomain, "jobStats")
def test_job_stats_operation_invalid(self, mock_stats, mock_info):
mock_stats.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"Domain is not running",
fakelibvirt.VIR_ERR_OPERATION_INVALID)
info = host.DomainJobInfo.for_domain(self.dom)
self.assertIsInstance(info, host.DomainJobInfo)
self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_COMPLETED, info.type)
self.assertEqual(0, info.time_elapsed)
self.assertEqual(0, info.time_remaining)
self.assertEqual(0, info.memory_total)
self.assertEqual(0, info.memory_processed)
self.assertEqual(0, info.memory_remaining)
mock_stats.assert_called_once_with()
self.assertFalse(mock_info.called)
@mock.patch.object(fakelibvirt.virDomain, "jobInfo")
@mock.patch.object(fakelibvirt.virDomain, "jobStats")
def test_job_info_operation_invalid(self, mock_stats, mock_info):
mock_stats.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"virDomainGetJobStats not implemented",
fakelibvirt.VIR_ERR_NO_SUPPORT)
mock_info.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"Domain is not running",
fakelibvirt.VIR_ERR_OPERATION_INVALID)
info = host.DomainJobInfo.for_domain(self.dom)
self.assertIsInstance(info, host.DomainJobInfo)
self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_COMPLETED, info.type)
self.assertEqual(0, info.time_elapsed)
self.assertEqual(0, info.time_remaining)
self.assertEqual(0, info.memory_total)
self.assertEqual(0, info.memory_processed)
self.assertEqual(0, info.memory_remaining)
mock_stats.assert_called_once_with()
mock_info.assert_called_once_with()
|
apache-2.0
|
Mj258/weiboapi
|
srapyDemo/envs/Lib/site-packages/twisted/web/test/test_util.py
|
5
|
11900
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.util}.
"""
from __future__ import absolute_import, division
from twisted.python.failure import Failure
from twisted.trial.unittest import TestCase
from twisted.internet import defer
from twisted.python.compat import _PY3, intToBytes, networkString
from twisted.web import resource, util
from twisted.web.error import FlattenerError
from twisted.web.http import FOUND
from twisted.web.server import Request
from twisted.web.template import TagLoader, flattenString, tags
from twisted.web.test.requesthelper import DummyChannel, DummyRequest
from twisted.web.util import DeferredResource
from twisted.web.util import _SourceFragmentElement, _FrameElement
from twisted.web.util import _StackElement, FailureElement, formatFailure
from twisted.web.util import redirectTo, _SourceLineElement
class RedirectToTests(TestCase):
"""
Tests for L{redirectTo}.
"""
def test_headersAndCode(self):
"""
L{redirectTo} will set the C{Location} and C{Content-Type} headers on
its request, and set the response code to C{FOUND}, so the browser will
be redirected.
"""
request = Request(DummyChannel(), True)
request.method = b'GET'
targetURL = b"http://target.example.com/4321"
redirectTo(targetURL, request)
self.assertEqual(request.code, FOUND)
self.assertEqual(
request.responseHeaders.getRawHeaders(b'location'), [targetURL])
self.assertEqual(
request.responseHeaders.getRawHeaders(b'content-type'),
[b'text/html; charset=utf-8'])
def test_redirectToUnicodeURL(self) :
"""
L{redirectTo} will raise TypeError if unicode object is passed in URL
"""
request = Request(DummyChannel(), True)
request.method = b'GET'
targetURL = u'http://target.example.com/4321'
self.assertRaises(TypeError, redirectTo, targetURL, request)
class FailureElementTests(TestCase):
"""
Tests for L{FailureElement} and related helpers which can render a
L{Failure} as an HTML string.
"""
def setUp(self):
"""
Create a L{Failure} which can be used by the rendering tests.
"""
def lineNumberProbeAlsoBroken():
message = "This is a problem"
raise Exception(message)
# Figure out the line number from which the exception will be raised.
self.base = lineNumberProbeAlsoBroken.__code__.co_firstlineno + 1
try:
lineNumberProbeAlsoBroken()
except:
self.failure = Failure(captureVars=True)
self.frame = self.failure.frames[-1]
def test_sourceLineElement(self):
"""
L{_SourceLineElement} renders a source line and line number.
"""
element = _SourceLineElement(
TagLoader(tags.div(
tags.span(render="lineNumber"),
tags.span(render="sourceLine"))),
50, " print 'hello'")
d = flattenString(None, element)
expected = (
u"<div><span>50</span><span>"
u" \N{NO-BREAK SPACE} \N{NO-BREAK SPACE}print 'hello'</span></div>")
d.addCallback(
self.assertEqual, expected.encode('utf-8'))
return d
def test_sourceFragmentElement(self):
"""
L{_SourceFragmentElement} renders source lines at and around the line
number indicated by a frame object.
"""
element = _SourceFragmentElement(
TagLoader(tags.div(
tags.span(render="lineNumber"),
tags.span(render="sourceLine"),
render="sourceLines")),
self.frame)
source = [
u' \N{NO-BREAK SPACE} \N{NO-BREAK SPACE}message = '
u'"This is a problem"',
u' \N{NO-BREAK SPACE} \N{NO-BREAK SPACE}raise Exception(message)',
u'# Figure out the line number from which the exception will be '
u'raised.',
]
d = flattenString(None, element)
if _PY3:
stringToCheckFor = ''.join([
'<div class="snippet%sLine"><span>%d</span><span>%s</span>'
'</div>' % (
["", "Highlight"][lineNumber == 1],
self.base + lineNumber,
(u" \N{NO-BREAK SPACE}" * 4 + sourceLine))
for (lineNumber, sourceLine)
in enumerate(source)]).encode("utf8")
else:
stringToCheckFor = ''.join([
'<div class="snippet%sLine"><span>%d</span><span>%s</span>'
'</div>' % (
["", "Highlight"][lineNumber == 1],
self.base + lineNumber,
(u" \N{NO-BREAK SPACE}" * 4 + sourceLine).encode('utf8'))
for (lineNumber, sourceLine)
in enumerate(source)])
d.addCallback(self.assertEqual, stringToCheckFor)
return d
def test_frameElementFilename(self):
"""
The I{filename} renderer of L{_FrameElement} renders the filename
associated with the frame object used to initialize the
L{_FrameElement}.
"""
element = _FrameElement(
TagLoader(tags.span(render="filename")),
self.frame)
d = flattenString(None, element)
d.addCallback(
# __file__ differs depending on whether an up-to-date .pyc file
# already existed.
self.assertEqual,
b"<span>" + networkString(__file__.rstrip('c')) + b"</span>")
return d
def test_frameElementLineNumber(self):
"""
The I{lineNumber} renderer of L{_FrameElement} renders the line number
associated with the frame object used to initialize the
L{_FrameElement}.
"""
element = _FrameElement(
TagLoader(tags.span(render="lineNumber")),
self.frame)
d = flattenString(None, element)
d.addCallback(
self.assertEqual, b"<span>" + intToBytes(self.base + 1) + b"</span>")
return d
def test_frameElementFunction(self):
"""
The I{function} renderer of L{_FrameElement} renders the line number
associated with the frame object used to initialize the
L{_FrameElement}.
"""
element = _FrameElement(
TagLoader(tags.span(render="function")),
self.frame)
d = flattenString(None, element)
d.addCallback(
self.assertEqual, b"<span>lineNumberProbeAlsoBroken</span>")
return d
def test_frameElementSource(self):
"""
The I{source} renderer of L{_FrameElement} renders the source code near
the source filename/line number associated with the frame object used to
initialize the L{_FrameElement}.
"""
element = _FrameElement(None, self.frame)
renderer = element.lookupRenderMethod("source")
tag = tags.div()
result = renderer(None, tag)
self.assertIsInstance(result, _SourceFragmentElement)
self.assertIdentical(result.frame, self.frame)
self.assertEqual([tag], result.loader.load())
def test_stackElement(self):
"""
The I{frames} renderer of L{_StackElement} renders each stack frame in
the list of frames used to initialize the L{_StackElement}.
"""
element = _StackElement(None, self.failure.frames[:2])
renderer = element.lookupRenderMethod("frames")
tag = tags.div()
result = renderer(None, tag)
self.assertIsInstance(result, list)
self.assertIsInstance(result[0], _FrameElement)
self.assertIdentical(result[0].frame, self.failure.frames[0])
self.assertIsInstance(result[1], _FrameElement)
self.assertIdentical(result[1].frame, self.failure.frames[1])
# They must not share the same tag object.
self.assertNotEqual(result[0].loader.load(), result[1].loader.load())
self.assertEqual(2, len(result))
def test_failureElementTraceback(self):
"""
The I{traceback} renderer of L{FailureElement} renders the failure's
stack frames using L{_StackElement}.
"""
element = FailureElement(self.failure)
renderer = element.lookupRenderMethod("traceback")
tag = tags.div()
result = renderer(None, tag)
self.assertIsInstance(result, _StackElement)
self.assertIdentical(result.stackFrames, self.failure.frames)
self.assertEqual([tag], result.loader.load())
def test_failureElementType(self):
"""
The I{type} renderer of L{FailureElement} renders the failure's
exception type.
"""
element = FailureElement(
self.failure, TagLoader(tags.span(render="type")))
d = flattenString(None, element)
if _PY3:
exc = b"builtins.Exception"
else:
exc = b"exceptions.Exception"
d.addCallback(
self.assertEqual, b"<span>" + exc + b"</span>")
return d
def test_failureElementValue(self):
"""
The I{value} renderer of L{FailureElement} renders the value's exception
value.
"""
element = FailureElement(
self.failure, TagLoader(tags.span(render="value")))
d = flattenString(None, element)
d.addCallback(
self.assertEqual, b'<span>This is a problem</span>')
return d
class FormatFailureTests(TestCase):
"""
Tests for L{twisted.web.util.formatFailure} which returns an HTML string
representing the L{Failure} instance passed to it.
"""
def test_flattenerError(self):
"""
If there is an error flattening the L{Failure} instance,
L{formatFailure} raises L{FlattenerError}.
"""
self.assertRaises(FlattenerError, formatFailure, object())
def test_returnsBytes(self):
"""
The return value of L{formatFailure} is a C{str} instance (not a
C{unicode} instance) with numeric character references for any non-ASCII
characters meant to appear in the output.
"""
try:
raise Exception("Fake bug")
except:
result = formatFailure(Failure())
self.assertIsInstance(result, bytes)
if _PY3:
self.assertTrue(all(ch < 128 for ch in result))
else:
self.assertTrue(all(ord(ch) < 128 for ch in result))
# Indentation happens to rely on NO-BREAK SPACE
self.assertIn(b" ", result)
class SDResource(resource.Resource):
def __init__(self,default):
self.default = default
def getChildWithDefault(self, name, request):
d = defer.succeed(self.default)
resource = util.DeferredResource(d)
return resource.getChildWithDefault(name, request)
class DeferredResourceTests(TestCase):
"""
Tests for L{DeferredResource}.
"""
def testDeferredResource(self):
r = resource.Resource()
r.isLeaf = 1
s = SDResource(r)
d = DummyRequest(['foo', 'bar', 'baz'])
resource.getChildForRequest(s, d)
self.assertEqual(d.postpath, ['bar', 'baz'])
def test_render(self):
"""
L{DeferredResource} uses the request object's C{render} method to
render the resource which is the result of the L{Deferred} being
handled.
"""
rendered = []
request = DummyRequest([])
request.render = rendered.append
result = resource.Resource()
deferredResource = DeferredResource(defer.succeed(result))
deferredResource.render(request)
self.assertEqual(rendered, [result])
|
mit
|
editxt/editxt
|
resources/syntax/scss.syntax.py
|
1
|
7181
|
# -*- coding: UTF-8 -*-
# Syntax definition automatically generated by hljs2xt.py
# source: scss.js
name = 'SCSS'
file_patterns = ['*.scss']
flags = re.IGNORECASE | re.MULTILINE
class comment:
default_text_color = DELIMITER
rules = [
# ignore {'begin': {'pattern': "\\b(a|an|the|are|I|I'm|isn't|don't|doesn't|won't|but|just|should|pretty|simply|enough|gonna|going|wtf|so|such|will|you|your|like)\\b", 'type': 'RegExp'}},
('doctag', [RE(r"(?:TODO|FIXME|NOTE|BUG|XXX):")]),
]
selector_tag = [
RE(r"\b(?:a|abbr|acronym|address|area|article|aside|audio|b|base|big|blockquote|body|br|button|canvas|caption|cite|code|col|colgroup|command|datalist|dd|del|details|dfn|div|dl|dt|em|embed|fieldset|figcaption|figure|footer|form|frame|frameset|(?:h[1-6])|head|header|hgroup|hr|html|i|iframe|img|input|ins|kbd|keygen|label|legend|li|link|map|mark|meta|meter|nav|noframes|noscript|object|ol|optgroup|option|output|p|param|pre|progress|q|rp|rt|ruby|samp|script|section|select|small|span|strike|strong|style|sub|sup|table|tbody|td|textarea|tfoot|th|thead|time|title|tr|tt|ul|var|video)\b"),
]
variable = ('variable', [RE(r"(?:\$[a-zA-Z-][a-zA-Z0-9_-]*)\b")])
attribute = [
RE(r"\b(?:z-index|word-wrap|word-spacing|word-break|width|widows|white-space|visibility|vertical-align|unicode-bidi|transition-timing-function|transition-property|transition-duration|transition-delay|transition|transform-style|transform-origin|transform|top|text-underline-position|text-transform|text-shadow|text-rendering|text-overflow|text-indent|text-decoration-style|text-decoration-line|text-decoration-color|text-decoration|text-align-last|text-align|tab-size|table-layout|right|resize|quotes|position|pointer-events|perspective-origin|perspective|page-break-inside|page-break-before|page-break-after|padding-top|padding-right|padding-left|padding-bottom|padding|overflow-y|overflow-x|overflow-wrap|overflow|outline-width|outline-style|outline-offset|outline-color|outline|orphans|order|opacity|object-position|object-fit|normal|none|nav-up|nav-right|nav-left|nav-index|nav-down|min-width|min-height|max-width|max-height|mask|marks|margin-top|margin-right|margin-left|margin-bottom|margin|list-style-type|list-style-position|list-style-image|list-style|line-height|letter-spacing|left|justify-content|initial|inherit|ime-mode|image-orientation|image-resolution|image-rendering|icon|hyphens|height|font-weight|font-variant-ligatures|font-variant|font-style|font-stretch|font-size-adjust|font-size|font-language-override|font-kerning|font-feature-settings|font-family|font|float|flex-wrap|flex-shrink|flex-grow|flex-flow|flex-direction|flex-basis|flex|filter|empty-cells|display|direction|cursor|counter-reset|counter-increment|content|column-width|column-span|column-rule-width|column-rule-style|column-rule-color|column-rule|column-gap|column-fill|column-count|columns|color|clip-path|clip|clear|caption-side|break-inside|break-before|break-after|box-sizing|box-shadow|box-decoration-break|bottom|border-width|border-top-width|border-top-style|border-top-right-radius|border-top-left-radius|border-top-color|border-top|border-style|border-spacing|border-right-width|border-right-style|border-right-color|border-right|border-radius|border-left-width|border-left-style|border-left-color|border-left|border-image-width|border-image-source|border-image-slice|border-image-repeat|border-image-outset|border-image|border-color|border-collapse|border-bottom-width|border-bottom-style|border-bottom-right-radius|border-bottom-left-radius|border-bottom-color|border-bottom|border|background-size|background-repeat|background-position|background-origin|background-image|background-color|background-clip|background-attachment|background-blend-mode|background|backface-visibility|auto|animation-timing-function|animation-play-state|animation-name|animation-iteration-count|animation-fill-mode|animation-duration|animation-direction|animation-delay|animation|align-self|align-items|align-content)\b"),
]
number = ('number', [RE(r"#[0-9A-Fa-f]+")])
number0 = [
RE(r"\b\d+(?:\.\d+)?(?:%|em|ex|ch|rem|vw|vh|vmin|vmax|cm|mm|in|pt|pc|px|deg|grad|rad|turn|s|ms|Hz|kHz|dpi|dpcm|dppx)?"),
]
number1 = ('number', number0)
operator_escape = ('operator.escape', [RE(r"\\[\s\S]")])
class string:
default_text_color = DELIMITER
rules = [operator_escape]
string0 = ('string', RE(r"\""), [RE(r"\"")], string)
string1 = ('string', RE(r"'"), [RE(r"'")], string)
class _group4:
default_text_color = DELIMITER
rules = [
variable,
number,
number1,
string0,
string1,
('meta', [RE(r"!important")]),
]
keyword = """
mixin include extend for if else each while charset import debug
media page content font-face namespace warn
""".split()
class _group5:
default_text_color = DELIMITER
rules = [
('keyword', keyword),
variable,
string0,
string1,
number,
number1,
# ignore {'begin': '\\s[A-Za-z0-9_.-]+', 'relevance': 0},
]
rules = [
('comment', RE(r"//"), [RE(r"$")], comment),
('comment', RE(r"/\*"), [RE(r"\*/")], comment),
('selector-id', [RE(r"\#[A-Za-z0-9_-]+")]),
('selector-class', [RE(r"\.[A-Za-z0-9_-]+")]),
('selector-attr', RE(r"\["), [RE(r"\]")]),
('selector-tag', selector_tag),
# ignore {'begin': ':(visited|valid|root|right|required|read-write|read-only|out-range|optional|only-of-type|only-child|nth-of-type|nth-last-of-type|nth-last-child|nth-child|not|link|left|last-of-type|last-child|lang|invalid|indeterminate|in-range|hover|focus|first-of-type|first-line|first-letter|first-child|first|enabled|empty|disabled|default|checked|before|after|active)'},
# ignore {'begin': '::(after|before|choices|first-letter|first-line|repeat-index|repeat-item|selection|value)'},
variable,
('attribute', attribute),
# ignore {'begin': '\\b(whitespace|wait|w-resize|visible|vertical-text|vertical-ideographic|uppercase|upper-roman|upper-alpha|underline|transparent|top|thin|thick|text|text-top|text-bottom|tb-rl|table-header-group|table-footer-group|sw-resize|super|strict|static|square|solid|small-caps|separate|se-resize|scroll|s-resize|rtl|row-resize|ridge|right|repeat|repeat-y|repeat-x|relative|progress|pointer|overline|outside|outset|oblique|nowrap|not-allowed|normal|none|nw-resize|no-repeat|no-drop|newspaper|ne-resize|n-resize|move|middle|medium|ltr|lr-tb|lowercase|lower-roman|lower-alpha|loose|list-item|line|line-through|line-edge|lighter|left|keep-all|justify|italic|inter-word|inter-ideograph|inside|inset|inline|inline-block|inherit|inactive|ideograph-space|ideograph-parenthesis|ideograph-numeric|ideograph-alpha|horizontal|hidden|help|hand|groove|fixed|ellipsis|e-resize|double|dotted|distribute|distribute-space|distribute-letter|distribute-all-lines|disc|disabled|default|decimal|dashed|crosshair|collapse|col-resize|circle|char|center|capitalize|break-word|break-all|bottom|both|bolder|bold|block|bidi-override|below|baseline|auto|always|all-scroll|absolute|table|table-cell)\\b'},
('_group4', RE(r":"), [RE(r";")], _group4),
('_group5', RE(r"@"), [RE(r"[{;]")], _group5),
]
|
gpl-3.0
|
JeanKossaifi/scikit-learn
|
sklearn/neighbors/nearest_centroid.py
|
199
|
7249
|
# -*- coding: utf-8 -*-
"""
Nearest Centroid Classification
"""
# Author: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse as sp
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import pairwise_distances
from ..preprocessing import LabelEncoder
from ..utils.validation import check_array, check_X_y, check_is_fitted
from ..utils.sparsefuncs import csc_median_axis_0
class NearestCentroid(BaseEstimator, ClassifierMixin):
"""Nearest centroid classifier.
Each class is represented by its centroid, with test samples classified to
the class with the nearest centroid.
Read more in the :ref:`User Guide <nearest_centroid_classifier>`.
Parameters
----------
metric: string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
The centroids for the samples corresponding to each class is the point
from which the sum of the distances (according to the metric) of all
samples that belong to that particular class are minimized.
If the "manhattan" metric is provided, this centroid is the median and
for all other metrics, the centroid is now set to be the mean.
shrink_threshold : float, optional (default = None)
Threshold for shrinking centroids to remove features.
Attributes
----------
centroids_ : array-like, shape = [n_classes, n_features]
Centroid of each class
Examples
--------
>>> from sklearn.neighbors.nearest_centroid import NearestCentroid
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = NearestCentroid()
>>> clf.fit(X, y)
NearestCentroid(metric='euclidean', shrink_threshold=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier
Notes
-----
When used for text classification with tf-idf vectors, this classifier is
also known as the Rocchio classifier.
References
----------
Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
"""
def __init__(self, metric='euclidean', shrink_threshold=None):
self.metric = metric
self.shrink_threshold = shrink_threshold
def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array, shape = [n_samples]
Target values (integers)
"""
# If X is sparse and the metric is "manhattan", store it in a csc
# format is easier to calculate the median.
if self.metric == 'manhattan':
X, y = check_X_y(X, y, ['csc'])
else:
X, y = check_X_y(X, y, ['csr', 'csc'])
is_X_sparse = sp.issparse(X)
if is_X_sparse and self.shrink_threshold:
raise ValueError("threshold shrinking not supported"
" for sparse input")
n_samples, n_features = X.shape
le = LabelEncoder()
y_ind = le.fit_transform(y)
self.classes_ = classes = le.classes_
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
# Mask mapping each class to it's members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
# Number of clusters in each class.
nk = np.zeros(n_classes)
for cur_class in range(n_classes):
center_mask = y_ind == cur_class
nk[cur_class] = np.sum(center_mask)
if is_X_sparse:
center_mask = np.where(center_mask)[0]
# XXX: Update other averaging methods according to the metrics.
if self.metric == "manhattan":
# NumPy does not calculate median of sparse matrices.
if not is_X_sparse:
self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
else:
self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
else:
if self.metric != 'euclidean':
warnings.warn("Averaging for metrics other than "
"euclidean and manhattan not supported. "
"The average is set to be the mean."
)
self.centroids_[cur_class] = X[center_mask].mean(axis=0)
if self.shrink_threshold:
dataset_centroid_ = np.mean(X, axis=0)
# m parameter for determining deviation
m = np.sqrt((1. / nk) + (1. / n_samples))
# Calculate deviation using the standard deviation of centroids.
variance = (X - self.centroids_[y_ind]) ** 2
variance = variance.sum(axis=0)
s = np.sqrt(variance / (n_samples - n_classes))
s += np.median(s) # To deter outliers from affecting the results.
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
deviation = ((self.centroids_ - dataset_centroid_) / ms)
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
signs = np.sign(deviation)
deviation = (np.abs(deviation) - self.shrink_threshold)
deviation[deviation < 0] = 0
deviation *= signs
# Now adjust the centroids using the deviation
msd = ms * deviation
self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Notes
-----
If the metric constructor parameter is "precomputed", X is assumed to
be the distance matrix between the data to be predicted and
``self.centroids_``.
"""
check_is_fitted(self, 'centroids_')
X = check_array(X, accept_sparse='csr')
return self.classes_[pairwise_distances(
X, self.centroids_, metric=self.metric).argmin(axis=1)]
|
bsd-3-clause
|
willhardy/django
|
django/db/backends/oracle/compiler.py
|
59
|
2044
|
from django.db.models.sql import compiler
class SQLCompiler(compiler.SQLCompiler):
def as_sql(self, with_limits=True, with_col_aliases=False, subquery=False):
"""
Creates the SQL for this query. Returns the SQL string and list
of parameters. This is overridden from the original Query class
to handle the additional SQL Oracle requires to emulate LIMIT
and OFFSET.
If 'with_limits' is False, any limit/offset information is not
included in the query.
"""
# The `do_offset` flag indicates whether we need to construct
# the SQL needed to use limit/offset with Oracle.
do_offset = with_limits and (self.query.high_mark is not None or self.query.low_mark)
if not do_offset:
sql, params = super(SQLCompiler, self).as_sql(
with_limits=False,
with_col_aliases=with_col_aliases,
subquery=subquery,
)
else:
sql, params = super(SQLCompiler, self).as_sql(
with_limits=False,
with_col_aliases=True,
subquery=subquery,
)
# Wrap the base query in an outer SELECT * with boundaries on
# the "_RN" column. This is the canonical way to emulate LIMIT
# and OFFSET on Oracle.
high_where = ''
if self.query.high_mark is not None:
high_where = 'WHERE ROWNUM <= %d' % (self.query.high_mark,)
sql = (
'SELECT * FROM (SELECT "_SUB".*, ROWNUM AS "_RN" FROM (%s) '
'"_SUB" %s) WHERE "_RN" > %d' % (sql, high_where, self.query.low_mark)
)
return sql, params
class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler):
pass
class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler):
pass
class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler):
pass
class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler):
pass
|
bsd-3-clause
|
nysan/yocto-autobuilder
|
lib/python2.6/site-packages/buildbot-0.8.4p1-py2.6.egg/buildbot/cache.py
|
4
|
2588
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from buildbot.util import lru
class CacheManager(object):
"""
A manager for a collection of caches, each for different types of objects
and with potentially-overlapping key spaces.
There is generally only one instance of this class, available at
C{master.caches}.
"""
# a cache of length one still has many benefits: it collects objects that
# remain referenced elsewhere; it collapses simultaneous misses into one
# miss function; and it will optimize repeated fetches of the same object.
DEFAULT_CACHE_SIZE = 1
def __init__(self):
self.config = {}
self._caches = {}
def get_cache(self, cache_name, miss_fn):
"""
Get an L{AsyncLRUCache} object with the given name. If such an object
does not exist, it will be created. Since the cache is permanent, this
method can be called only once, e.g., in C{startService}, and it value
stored indefinitely.
@param cache_name: name of the cache (usually the name of the type of
object it stores)
@param miss_fn: miss function for the cache; see L{AsyncLRUCache}
constructor.
@returns: L{AsyncLRUCache} instance
"""
try:
return self._caches[cache_name]
except KeyError:
max_size = self.config.get(cache_name, self.DEFAULT_CACHE_SIZE)
assert max_size >= 1
c = self._caches[cache_name] = lru.AsyncLRUCache(miss_fn, max_size)
return c
def load_config(self, new_config):
self.config = new_config
for name, cache in self._caches.iteritems():
cache.set_max_size(new_config.get(name, self.DEFAULT_CACHE_SIZE))
def get_metrics(self):
return dict([
(n, dict(hits=c.hits, refhits=c.refhits, misses=c.misses))
for n, c in self._caches.iteritems()])
|
gpl-2.0
|
liveaverage/baruwa
|
src/baruwa/messages/management/commands/updatesarules.py
|
1
|
2104
|
#
# Baruwa - Web 2.0 MailScanner front-end.
# Copyright (C) 2010-2012 Andrew Colin Kissa <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# vim: ai ts=4 sts=4 et sw=4
#
import re
import os
import glob
from django.core.management.base import NoArgsCommand
from django.utils.translation import ugettext as _
from django.db import IntegrityError
from django.conf import settings
from baruwa.messages.models import SaRules
class Command(NoArgsCommand):
"update the rules table"
help = _("Updates the database with the spam descriptions")
def handle_noargs(self, **options):
search_dirs = getattr(settings, 'SA_RULES_DIRS', [])
regex = re.compile(r'^describe\s+(\S+)\s+(.+)$')
for dirc in search_dirs:
if not dirc.endswith(os.sep):
dirc = dirc + os.sep
for the_file in glob.glob(dirc + '*.cf'):
rule_file = open(the_file, 'r')
for line in rule_file.readlines():
match = regex.match(line)
if match:
print match.groups()[0] + ' ' + match.groups()[1]
rule = SaRules(rule=match.groups()[0],
rule_desc=match.groups()[1])
try:
rule.save()
except IntegrityError:
pass
rule_file.close()
|
gpl-2.0
|
shikil/sympy
|
sympy/utilities/tests/test_pytest.py
|
105
|
1601
|
from sympy.utilities.pytest import raises, USE_PYTEST
if USE_PYTEST:
import py.test
pytestmark = py.test.mark.skipif(USE_PYTEST,
reason=("using py.test"))
# Test callables
def test_expected_exception_is_silent_callable():
def f():
raise ValueError()
raises(ValueError, f)
def test_lack_of_exception_triggers_AssertionError_callable():
try:
raises(Exception, lambda: 1 + 1)
assert False
except AssertionError as e:
assert str(e) == "DID NOT RAISE"
def test_unexpected_exception_is_passed_through_callable():
def f():
raise ValueError("some error message")
try:
raises(TypeError, f)
assert False
except ValueError as e:
assert str(e) == "some error message"
# Test with statement
def test_expected_exception_is_silent_with():
with raises(ValueError):
raise ValueError()
def test_lack_of_exception_triggers_AssertionError_with():
try:
with raises(Exception):
1 + 1
assert False
except AssertionError as e:
assert str(e) == "DID NOT RAISE"
def test_unexpected_exception_is_passed_through_with():
try:
with raises(TypeError):
raise ValueError("some error message")
assert False
except ValueError as e:
assert str(e) == "some error message"
# Now we can use raises() instead of try/catch
# to test that a specific exception class is raised
def test_second_argument_should_be_callable_or_string():
raises(TypeError, lambda: raises("irrelevant", 42))
|
bsd-3-clause
|
LeartS/odoo
|
openerp/report/render/rml2txt/utils.py
|
443
|
4710
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import copy
import re
import reportlab
import reportlab.lib.units
from openerp.tools.safe_eval import safe_eval as eval
_regex = re.compile('\[\[(.+?)\]\]')
def _child_get(node, self=None, tagname=None):
for n in node:
if self and self.localcontext and n.get('rml_loop', False):
oldctx = self.localcontext
for ctx in eval(n.get('rml_loop'),{}, self.localcontext):
self.localcontext.update(ctx)
if (tagname is None) or (n.tag==tagname):
if n.get('rml_except', False):
try:
eval(n.get('rml_except'), {}, self.localcontext)
except Exception:
continue
if n.get('rml_tag'):
try:
(tag,attr) = eval(n.get('rml_tag'),{}, self.localcontext)
n2 = copy.copy(n)
n2.tag = tag
n2.attrib.update(attr)
yield n2
except Exception:
yield n
else:
yield n
self.localcontext = oldctx
continue
if self and self.localcontext and n.get('rml_except', False):
try:
eval(n.get('rml_except'), {}, self.localcontext)
except Exception:
continue
if (tagname is None) or (n.tag==tagname):
yield n
def _process_text(self, txt):
if not self.localcontext:
return txt
if not txt:
return ''
result = ''
sps = _regex.split(txt)
while sps:
# This is a simple text to translate
result += self.localcontext.get('translate', lambda x:x)(sps.pop(0))
if sps:
try:
txt2 = eval(sps.pop(0),self.localcontext)
except Exception:
txt2 = ''
if isinstance(txt2, (int, float)):
txt2 = str(txt2)
if isinstance(txt2, basestring):
result += txt2
return result
def text_get(node):
rc = ''
for node in node.getchildren():
rc = rc + node.text
return rc
units = [
(re.compile('^(-?[0-9\.]+)\s*in$'), reportlab.lib.units.inch),
(re.compile('^(-?[0-9\.]+)\s*cm$'), reportlab.lib.units.cm),
(re.compile('^(-?[0-9\.]+)\s*mm$'), reportlab.lib.units.mm),
(re.compile('^(-?[0-9\.]+)\s*$'), 1)
]
def unit_get(size):
global units
if size:
for unit in units:
res = unit[0].search(size, 0)
if res:
return unit[1]*float(res.group(1))
return False
def tuple_int_get(node, attr_name, default=None):
if not node.get(attr_name):
return default
res = [int(x) for x in node.get(attr_name).split(',')]
return res
def bool_get(value):
return (str(value)=="1") or (value.lower()=='yes')
def attr_get(node, attrs, dict=None):
if dict is None:
dict = {}
res = {}
for name in attrs:
if node.get(name):
res[name] = unit_get(node.get(name))
for key in dict:
if node.get(key):
if dict[key]=='str':
res[key] = str(node.get(key))
elif dict[key]=='bool':
res[key] = bool_get(node.get(key))
elif dict[key]=='int':
res[key] = int(node.get(key))
elif dict[key]=='unit':
res[key] = unit_get(node.get(key))
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
andrescodas/casadi
|
test/python/function.py
|
1
|
39929
|
#
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010-2014 Joel Andersson, Joris Gillis, Moritz Diehl,
# K.U. Leuven. All rights reserved.
# Copyright (C) 2011-2014 Greg Horn
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
from casadi import *
import casadi as c
import numpy
import unittest
from types import *
from helpers import *
scipy_interpolate = False
try:
import scipy.interpolate
scipy.interpolate.RectBivariateSpline
scipy_interpolate = True
except:
pass
class Functiontests(casadiTestCase):
def test_call_empty(self):
x = SX.sym("x",2)
fsx = Function("fsx", [x,[]],[x])
x = MX.sym("x",2)
fmx1 = Function("fmx1", [x,MX()],[x])
fmx2 = Function("fmx2", [x,[]],[x])
for f in [fsx,fmx1,fmx2]:
f(0,0)
X = MX.sym("X",2)
F = f(X,MX())
g = Function("g", [X],[F])
g(0)
x = SX.sym("x",2)
fsx = Function("fsx", [x],[x,[]])
x = MX.sym("x",2)
fmx1 = Function("fmx1", [x],[x,MX()])
fmx2 = Function("fmx2", [x],[x,[]])
for f in [fsx,fmx1,]:
f(0)
X = MX.sym("X",2)
F = f(X)
g = Function("g", [X],F)
g(0)
def test_MX_funSeed(self):
self.message("MX_funSeed")
x1 = MX.sym("x",2)
y1 = MX.sym("y")
x2 = MX.sym("x",2)
y2 = MX.sym("y")
p= Function("p", [x1,y1,x2,y2],[sin(x1) + y1,sin(x2) + y2])
n1 = DM([4,5])
N1 = 3
n2 = DM([5,7])
N2 = 8
out = p(n1,N1,n2,N2)
self.checkarray(sin(n1)+N1,out[0],"output")
self.checkarray(sin(n2)+N2,out[1],"output")
def test_issue304(self):
self.message("regression test for #304") # this code used to segfault
x = SX.sym("x")
f = Function("f", [x],[x**2,x**3])
X = MX.sym("X")
z=f(X)
g = Function("g", [X], z).expand()
def test_jacobian(self):
x = SX.sym("x",3,1)
y = SX.sym("y",2,1)
f = Function("f", [x,y],[x**2,y,x*y[0]])
g = f.jacobian_old(0, 0)
self.assertEqual(g.n_in(),f.n_in())
self.assertEqual(g.n_out(),f.n_out()+1)
def test_xfunction(self):
x = SX.sym("x",3,1)
y = SX.sym("y",2,1)
f = Function("f", [x,y],[x**2,y,x*y[0]])
X = MX.sym("x",3,1)
Y = MX.sym("y",2,1)
F = Function("F", [X,Y],[X**2,Y,X*Y[0]])
self.checkfunction(f,F,inputs=[[0.1,0.7,1.3],[7.1,2.9]],sens_der=False,evals=False)
@memory_heavy()
def test_jacobians(self):
x = SX.sym("x")
self.assertEqual(jacobian(5,x).nnz(),0)
def test(sp):
x = SX.sym("x",sp.size2())
sp2 = jacobian(mtimes(DM.ones(sp),x),x).sparsity()
self.checkarray(sp.row(),sp2.row());
self.checkarray(sp.colind(),sp2.colind());
for i in range(5):
test(Sparsity.lower(i))
test(Sparsity.lower(i).T)
test(Sparsity.dense(i,i))
test(Sparsity.diag(i))
for i in [63,64,65,127,128,129]:
d = Sparsity.diag(i)
test(d)
test(d + Sparsity.rowcol([0],[5],i,i))
b = Sparsity.band(i,-1) + Sparsity.band(i,1)
test(b + Sparsity.rowcol([0],[5],i,i))
m = IM.ones(Sparsity.diag(129))
m[:50,0] = 1
m[60:,0] = 1
m[6:9,6] = 1
m[9,9:12] = 1
sp = m[:,:120].sparsity()
test(sp)
#test(sp.T)
m = IM.ones(Sparsity.diag(64))
m[:50,0] = 1
m[60:,0] = 1
sp = m.T[:40,:].sparsity()
test(sp)
test(sp.T)
sp = m[:40,:].sparsity()
test(sp)
test(sp.T)
sp = m.T[:20,:].sparsity()
test(sp)
test(sp.T)
sp = m[:20,:].sparsity()
test(sp)
test(sp.T)
for i in [63,64,65,127,128,129]:
test(Sparsity.lower(i))
test(Sparsity.lower(i).T)
for n in ([63,64,65,127,128,129] if args.run_slow else [63,64,65]):
for m in ([63,64,65,127,128,129] if args.run_slow else [63,64,65]):
print((n,m))
sp = Sparsity.dense(n,m)
test(sp)
random.seed(0)
I = IM.ones(sp)
for i in range(n):
for j in range(m):
if random.random()<0.5:
I[i,j] = 0
I = sparsify(I)
sp_holes = I.sparsity()
test(sp_holes)
z = IM(sp_holes.size1(), sp_holes.size2())
R = 5
v = []
for r in range(R):
h = [z]*5
h[r] = I
v.append(horzcat(*h))
d = vertcat(*v)
test(d.sparsity())
@memory_heavy()
def test_hessians(self):
def test(sp):
x = SX.sym("x",sp.size2())
self.assertTrue(sp==sp.T)
f = Function("f", [x],[mtimes([x.T,DM.ones(sp),x])])
J = f.hessian_old(0, 0)
sp2 = J.sparsity_out(0)
self.checkarray(sp.row(),sp2.row())
self.checkarray(sp.colind(),sp2.colind())
A = IM([[1,1,0,0,0,0],[1,1,1,0,1,1],[0,1,1,1,0,0],[0,0,1,1,0,1],[0,1,0,0,1,0],[0,1,0,1,0,1]])
A = sparsify(A)
C = A.sparsity()
test(C)
A = IM([[1,0,0,0,0,0],[0,1,1,0,1,1],[0,1,1,1,0,0],[0,0,1,1,0,1],[0,1,0,0,1,0],[0,1,0,1,0,1]])
A = sparsify(A)
C = A.sparsity()
test(C)
A = IM([[1,0,0,0,0,0],[0,1,0,0,1,1],[0,0,1,1,0,0],[0,0,1,1,0,1],[0,1,0,0,1,0],[0,1,0,1,0,1]])
A = sparsify(A)
C = A.sparsity()
test(C)
A = IM([[0,0,0,0,0,0],[0,1,0,0,1,1],[0,0,1,1,0,0],[0,0,1,1,0,1],[0,1,0,0,1,0],[0,1,0,1,0,1]])
A = sparsify(A)
C = A.sparsity()
test(C)
A = IM([[0,0,0,0,0,0],[0,1,0,0,1,0],[0,0,1,1,0,0],[0,0,1,1,0,1],[0,1,0,0,1,0],[0,0,0,1,0,1]])
A = sparsify(A)
C = A.sparsity()
test(C)
for i in [63,64,65,100,127,128,129]:
d = Sparsity.diag(i)
test(d)
test(d + Sparsity.rowcol([0],[5],i,i) + Sparsity.rowcol([5],[0],i,i))
b = Sparsity.band(i,-1) + Sparsity.band(i,1)
test(b)
test(b + Sparsity.rowcol([0],[5],i,i) + Sparsity.rowcol([5],[0],i,i))
d = Sparsity.dense(i,i)
test(d)
d = Sparsity.diag(i) + Sparsity.triplet(i,i,[0]*i,list(range(i)))+Sparsity.triplet(i,i,list(range(i)),[0]*i)
test(d)
sp = Sparsity.dense(i,i)
random.seed(0)
I = IM.ones(sp)
for ii in range(i):
for jj in range(i):
if random.random()<0.5:
I[ii,jj] = 0
I[jj,ii] = 0
I = sparsify(I)
sp_holes = I.sparsity()
test(sp_holes)
z = IM(sp_holes.size1(), sp_holes.size2())
R = 5
v = []
for r in range(R):
h = [z]*5
h[r] = I
v.append(horzcat(*h))
d = vertcat(*v)
test(d.sparsity())
def test_customIO(self):
x = SX.sym("x")
f = Function('f',[x],[x*x, x],["i0"], ["foo","bar"])
ret = f(i0=12)
self.checkarray(DM([144]),ret["foo"])
self.checkarray(DM([12]),ret["bar"])
with self.assertRaises(Exception):
f_out["baz"]
ret = f(i0=SX(12))
self.checkarray(ret["foo"],DM([144]))
self.checkarray(ret["bar"],DM([12]))
with self.assertRaises(Exception):
self.checkarray(ret["baz"],DM([12]))
def test_derivative_simplifications(self):
n = 1
x = SX.sym("x",n)
M = Function("M", [x],[mtimes((x-DM(list(range(n)))),x.T)])
P = MX.sym("P",n,n)
X = MX.sym("X",n)
M_X= M(X)
Pf = Function("P", [X, P], [mtimes(M_X,P)])
P_P = Pf.jacobian_old(1, 0)
self.assertFalse("derivative" in str(P_P))
def test_issue1464(self):
n = 6
x = SX.sym("x",n)
u = SX.sym("u")
N = 9
rk4 = Function("f",[x,u],[x+u])
for XX,XFunction in [(SX,Function),(MX,Function)]:
g = []
g2 = []
V = XX.sym("V",(N+1)*n+N)
VX,VU = vertsplit(V,[0,(N+1)*n,(N+1)*n+N])
VXk = vertsplit(VX,n)
VUk = vertsplit(VU,1)
for k in range(N):
xf = rk4(VXk[k],VUk[k])
xfp = vertsplit(xf,int(n/2))
vp = vertsplit(VXk[k+1],int(n/2))
g.append(xfp[0] - vp[0])
g.append(xfp[1] - vp[1])
g2.append(xf-VXk[k+1])
for i in range(2):
f = XFunction("nlp",[V],[vertcat(*g)],{"ad_weight_sp":i})
assert f.sparsity_jac(0, 0).nnz()==162
f2 = XFunction("nlp",[V],[vertcat(*g2)],{"ad_weight_sp":i})
assert f2.sparsity_jac(0, 0).nnz()==162
def test_callback(self):
class mycallback(Callback):
def __init__(self, name, opts={}):
Callback.__init__(self)
self.construct(name, opts)
def eval(self,argin):
return [argin[0]**2]
foo = mycallback("my_f")
x = MX.sym('x')
y = foo(x)
f = Function("f",[x],[y])
out = f(5)
self.checkarray(out,25)
def test_callback_errors(self):
class mycallback(Callback):
def __init__(self, name, opts={}):
Callback.__init__(self)
self.construct(name, opts)
def eval(self,argin):
raise Exception("foobar")
foo = mycallback("my_f")
x = MX.sym('x')
y = foo(x)
f = Function("f",[x],[y])
try:
f(3)
except Exception as e:
self.assertTrue("foobar" in str(e))
def test_mapdict(self):
x = SX.sym("x")
y = SX.sym("y",2)
z = SX.sym("z",2,2)
v = SX.sym("z",Sparsity.upper(3))
fun = Function("f",{"x":x,"y":y,"z":z,"v":v,"I":mtimes(z,y)+x,"II":sin(y*x).T,"III":v/x},["x","y","z","v"],["I","II","III"])
n = 2
X = [MX.sym("x") for i in range(n)]
Y = [MX.sym("y",2) for i in range(n)]
Z = [MX.sym("z",2,2) for i in range(n)]
V = [MX.sym("z",Sparsity.upper(3)) for i in range(n)]
res = fun.map(n).call({"x":horzcat(*X),"y":horzcat(*Y),"z":horzcat(*Z),"v":horzcat(*V)})
res2 = fun.map(n).call([horzcat(*X),horzcat(*Y),horzcat(*Z),horzcat(*V)])
F = Function("F",X+Y+Z+V,res2)
F2 = Function("F",X+Y+Z+V,[res["I"],res["II"],res["III"]])
np.random.seed(0)
X_ = [ DM(i.sparsity(),np.random.random(i.nnz())) for i in X ]
Y_ = [ DM(i.sparsity(),np.random.random(i.nnz())) for i in Y ]
Z_ = [ DM(i.sparsity(),np.random.random(i.nnz())) for i in Z ]
V_ = [ DM(i.sparsity(),np.random.random(i.nnz())) for i in V ]
self.checkfunction(F,F2,inputs=X_+Y_+Z_+V_,jacobian=False,hessian=False,evals=False)
@memory_heavy()
def test_map_node(self):
x = SX.sym("x")
y = SX.sym("y",2)
z = SX.sym("z",2,2)
v = SX.sym("z",Sparsity.upper(3))
fun = Function("f",[x,y,z,v],[mtimes(z,y)+x,sin(y*x).T,v/x])
n = 2
X = [MX.sym("x") for i in range(n)]
Y = [MX.sym("y",2) for i in range(n)]
Z = [MX.sym("z",2,2) for i in range(n)]
V = [MX.sym("z",Sparsity.upper(3)) for i in range(n)]
for parallelization in ["serial","openmp","unroll"] if args.run_slow else ["serial"]:
print(parallelization)
res = fun.map(n, parallelization).call([horzcat(*x) for x in [X,Y,Z,V]])
F = Function("F",X+Y+Z+V,list(map(sin,res)))
resref = [[] for i in range(fun.n_out())]
for r in zip(X,Y,Z,V):
for i,e in enumerate(map(sin,fun.call(r))):
resref[i] = resref[i] + [e]
Fref = Function("F",X+Y+Z+V,[horzcat(*x) for x in resref])
np.random.seed(0)
X_ = [ DM(i.sparsity(),np.random.random(i.nnz())) for i in X ]
Y_ = [ DM(i.sparsity(),np.random.random(i.nnz())) for i in Y ]
Z_ = [ DM(i.sparsity(),np.random.random(i.nnz())) for i in Z ]
V_ = [ DM(i.sparsity(),np.random.random(i.nnz())) for i in V ]
for f in [F, F.expand('expand_'+F.name())]:
self.checkfunction(f,Fref,inputs=X_+Y_+Z_+V_,sparsity_mod=args.run_slow)
@memory_heavy()
def test_mapsum(self):
x = SX.sym("x")
y = SX.sym("y",2)
z = SX.sym("z",2,2)
v = SX.sym("z",Sparsity.upper(3))
fun = Function("f",[x,y,z,v],[mtimes(z,y)+x,sin(y*x).T,v/x])
n = 2
X = [MX.sym("x") for i in range(n)]
Y = [MX.sym("y",2) for i in range(n)]
Z = [MX.sym("z",2,2) for i in range(n)]
V = [MX.sym("z",Sparsity.upper(3)) for i in range(n)]
zi = 0
for Z_alt in [Z,[MX()]*3]:
zi+= 1
for parallelization in ["serial","openmp","unroll"]:
res = fun.mapsum([horzcat(*x) for x in [X,Y,Z_alt,V]],parallelization) # Joris - clean alternative for this?
for ad_weight_sp in [0,1]:
F = Function("F",X+Y+Z+V,list(map(sin,res)),{"ad_weight": 0,"ad_weight_sp":ad_weight_sp})
resref = [0 for i in range(fun.n_out())]
for r in zip(X,Y,Z_alt,V):
for i,e in enumerate(fun.call(r)):
resref[i] = resref[i] + e
Fref = Function("F",X+Y+Z+V,list(map(sin,resref)))
np.random.seed(0)
X_ = [ DM(i.sparsity(),np.random.random(i.nnz())) for i in X ]
Y_ = [ DM(i.sparsity(),np.random.random(i.nnz())) for i in Y ]
Z_ = [ DM(i.sparsity(),np.random.random(i.nnz())) for i in Z ]
V_ = [ DM(i.sparsity(),np.random.random(i.nnz())) for i in V ]
inputs = X_+Y_+Z_+V_
self.check_codegen(F,inputs=inputs)
for f in [F,toSX_fun(F)]:
self.checkfunction(f,Fref,inputs=inputs,sparsity_mod=args.run_slow)
@memory_heavy()
def test_mapsum2(self):
x = SX.sym("x")
y = SX.sym("y",2)
z = SX.sym("z",2,2)
v = SX.sym("z",Sparsity.upper(3))
fun = Function("f",[x,y,z,v],[mtimes(z,y)+x,sin(y*x).T,v/x])
n = 2
X = [MX.sym("x") for i in range(n)]
Y = [MX.sym("y",2) for i in range(n)]
Z = MX.sym("z",2,2)
V = MX.sym("z",Sparsity.upper(3))
for Z_alt in [Z]:
for parallelization in ["serial","openmp","unroll"]:
for ad_weight_sp in [0,1]:
for ad_weight in [0,1]:
F = fun.map("map",parallelization,n,[2,3],[0],{"ad_weight_sp":ad_weight_sp,"ad_weight":ad_weight})
resref = [0 for i in range(fun.n_out())]
acc = 0
bl = []
cl = []
for r in zip(X,Y,[Z_alt]*n,[V]*n):
a,b,c= fun(*r)
acc = acc + a
bl.append(b)
cl.append(c)
Fref = Function("F",[horzcat(*X),horzcat(*Y),Z,V],[acc,horzcat(*bl),horzcat(*cl)])
np.random.seed(0)
X_ = [ DM(i.sparsity(),np.random.random(i.nnz())) for i in X ]
Y_ = [ DM(i.sparsity(),np.random.random(i.nnz())) for i in Y ]
Z_ = DM(Z.sparsity(),np.random.random(Z.nnz()))
V_ = DM(V.sparsity(),np.random.random(V.nnz()))
inputs = [horzcat(*X_),horzcat(*Y_),Z_,V_]
self.check_codegen(F,inputs=inputs)
for f in [F,toSX_fun(F)]:
self.checkfunction(f,Fref,inputs=inputs,sparsity_mod=args.run_slow)
def test_issue1522(self):
V = MX.sym("X",2)
P = MX.sym("X",0)
x = V[0]
y = V[1]
obj = (x-(x+y))**2
nlp = Function("nlp", [V, P], [obj, MX()], ['x', 'p'], ['f', 'g'])
self.assertTrue(nlp.hessian_old(0,0).sparsity_out(0).is_symmetric())
V = MX.sym("X",6)
xs = [ V[0:2], V[2:4] ]
travels = [ V[4], V[5] ]
dist = 0
for j in range(2):
dist+=sum1((xs[0]-(xs[j]+travels[j]))**2)
nlp = Function("nlp", [V, P], [-dist, MX()], ['x', 'p'], ['f', 'g'])
hs = []
for n in [nlp, nlp.expand('nlp_expanded')]:
H = n.reverse(1).jacobian_old(0,0,False,True)
h = H(der_x=1,adj_f=1)[H.name_out(0)]
hs.append(h)
self.checkarray(*hs)
def test_repmatnode(self):
x = MX.sym("x",2)
y = sin(repmat(x**2,1,3))
z = MX.sym("y",2,2)
F = Function("f",[x,z],[sum2(sum1(y))])
x = SX.sym("x",2)
y = sin(repmat(x**2,1,3))
z = SX.sym("y",2,2)
Fref = Function("f",[x,z],[sum2(sum1(y))])
x0 = DM([1,7])
x1 = DM([[3,0],[2,4]])
self.check_codegen(F,inputs=[x0,x1])
self.checkfunction(F,Fref,inputs=[x0,x1])
def test_repsumnode(self):
x = MX.sym("x",2)
z = MX.sym("y",2,2)
F = Function("f",[x,z],[sin(repsum((x**2).T,1,2)),(cos(x**2)*2*x).T])
x = SX.sym("x",2)
z = SX.sym("y",2,2)
Fref = Function("f",[x,z],[sin(repsum((x**2).T,1,2)),(cos(x**2)*2*x).T])
x0 = DM([1,7])
x1 = DM([[3,0],[2,4]])
self.check_codegen(F,inputs=[x0,x1])
self.checkfunction(F,Fref,inputs=[x0,x1])
def test_unknown_options(self):
x = SX.sym("x")
with self.assertRaises(Exception):
f = SXFunction("f", [x],[x],{"fooo": False})
with self.assertRaises(Exception):
f = SXFunction("f", [x],[x],{"ad_weight": "foo"})
if not has_nlpsol("ipopt"):
return
@known_bug()
def test_unknown_options_stringvector(self):
x = SX.sym("x")
solver = nlpsol("mysolver", "ipopt", {"x":x,"f":x**2}, {"monitor": ["eval_f"]})
with self.assertRaises(Exception):
solver = nlpsol("mysolver", "ipopt", {"x":x,"f":x**2}, {"monitor": ["abc"]})
@memory_heavy()
def test_mapaccum(self):
x = SX.sym("x",2)
y = SX.sym("y")
z = SX.sym("z",2,2)
v = SX.sym("v",Sparsity.upper(3))
fun = Function("f",[x,y,z,v],[mtimes(z,x)+y,sin(y*x).T,v/y])
n = 2
X = MX.sym("x",x.sparsity())
Y = [MX.sym("y",y.sparsity()) for i in range(n)]
Z = [MX.sym("z",z.sparsity()) for i in range(n)]
V = [MX.sym("v",v.sparsity()) for i in range(n)]
np.random.seed(0)
X_ = DM(x.sparsity(),np.random.random(x.nnz()))
Y_ = [ DM(i.sparsity(),np.random.random(i.nnz())) for i in Y ]
Z_ = [ DM(i.sparsity(),np.random.random(i.nnz())) for i in Z ]
V_ = [ DM(i.sparsity(),np.random.random(i.nnz())) for i in V ]
for ad_weight in range(2):
for ad_weight_sp in range(2):
F = fun.mapaccum("map",n,[0],[0],{"ad_weight_sp":ad_weight_sp,"ad_weight": ad_weight})
F.forward(2)
XP = X
Y0s = []
Y1s = []
Xps = []
for k in range(n):
XP, Y0,Y1 = fun(XP,Y[k],Z[k],V[k])
Y0s.append(Y0)
Y1s.append(Y1)
Xps.append(XP)
Fref = Function("f",[X,horzcat(*Y),horzcat(*Z),horzcat(*V)],[horzcat(*Xps),horzcat(*Y0s),horzcat(*Y1s)])
inputs = [X_,horzcat(*Y_),horzcat(*Z_),horzcat(*V_)]
for f in [F,toSX_fun(F)]:
self.checkfunction(f,Fref,inputs=inputs)
self.check_codegen(f,inputs=inputs)
fun = Function("f",[y,x,z,v],[mtimes(z,x)+y+c.trace(v)**2,sin(y*x).T,v/y])
for ad_weight in range(2):
for ad_weight_sp in range(2):
F = fun.mapaccum("map",n,[1,3],[0,2],{"ad_weight_sp":ad_weight_sp,"ad_weight": ad_weight})
XP = X
VP = V[0]
Y0s = []
Y1s = []
Xps = []
Vps = []
for k in range(n):
XP, Y0, VP = fun(Y[k],XP,Z[k],VP)
Y0s.append(Y0)
Xps.append(XP)
Vps.append(VP)
Fref = Function("f",[horzcat(*Y),X,horzcat(*Z),V[0]],[horzcat(*Xps),horzcat(*Y0s),horzcat(*Vps)])
inputs = [horzcat(*Y_),X_,horzcat(*Z_),V_[0]]
for f in [F,toSX_fun(F)]:
self.checkfunction(f,Fref,inputs=inputs)
self.check_codegen(f,inputs=inputs)
def test_mapaccum_schemes(self):
x = SX.sym("x",2)
y = SX.sym("y")
z = SX.sym("z",2,2)
v = SX.sym("v",Sparsity.upper(3))
fun = Function("f",[y,z,x,v],[mtimes(z,x)+y,sin(y*x).T,v/y],["y","z","x","v"],["out0","out1","out2"])
n = 2
F = fun.mapaccum("map",n,[2],[0])
scheme_in_fun = fun.name_in()
scheme_out_fun = fun.name_out()
scheme_in_F = F.name_in()
scheme_out_F = F.name_out()
self.assertTrue(len(scheme_in_fun),len(scheme_in_F))
self.assertTrue(len(scheme_out_fun),len(scheme_out_F))
for sf,sF in zip(scheme_in_fun,scheme_in_F):
self.assertTrue(sf==sF)
for sf,sF in zip(scheme_out_fun,scheme_out_F):
self.assertTrue(sf==sF)
fun = Function("f",[x,y,z,v],[mtimes(z,x)+y,sin(y*x).T,v/y],["x","y","z","v"],["out0","out1","out2"])
n = 2
F = fun.mapaccum("map",n)
self.assertTrue(len(scheme_in_fun),len(scheme_in_F))
self.assertTrue(len(scheme_out_fun),len(scheme_out_F))
for sf,sF in zip(scheme_in_fun,scheme_in_F):
self.assertTrue(sf==sF)
for sf,sF in zip(scheme_out_fun,scheme_out_F):
self.assertTrue(sf==sF)
# @requiresPlugin(Importer,"clang")
# def test_jitfunction_clang(self):
# x = MX.sym("x")
# F = Function("f",[x],[x**2],{'jit':True})
# out = F([5])
# self.checkarray(out[0],25)
# @requiresPlugin(Importer,"clang")
# def test_clang_c(self):
# compiler = Importer('../data/helloworld.c', 'clang')
# f = external("helloworld_c", compiler)
# [v] = f([])
# self.checkarray(2.37683, v, digits=4)
# @requiresPlugin(Importer,"clang")
# def test_clang_cxx(self):
# compiler = Importer('../data/helloworld.cxx', 'clang')
# f = external("helloworld_cxx", compiler)
# [v] = f([])
# self.checkarray(2.37683, v, digits=4)
# @requiresPlugin(Importer,"shell")
# def test_shell_c(self):
# compiler = Importer('../data/helloworld.c', 'shell')
# f = external("helloworld_c", compiler)
# [v] = f([])
# self.checkarray(2.37683, v, digits=4)
# @requiresPlugin(Importer,"shell")
# def test_shell_cxx(self):
# opts = {'compiler':'g++'}
# compiler = Importer('../data/helloworld.cxx', 'shell', opts)
# f = external("helloworld_cxx", compiler)
# [v] = f([])
# self.checkarray(2.37683, v, digits=4)
def test_depends_on(self):
x = SX.sym("x")
y = x**2
try:
depends_on(x,y)
except Exception as e:
s = str(e)
self.assertTrue("not symbolic" in s)
try:
Function("f",[y],[x])
except Exception as e:
s = str(e)
self.assertTrue("not symbolic" in s)
def test_1d_interpolant(self):
grid = [[0, 1, 1.5, 2, 3]]
values = [0, 1, 2, 5, 3]
F = interpolant('F', 'linear', grid, values)
def same(a, b): return abs(float(a)-b)<1e-8
pairs = [
(3.4,3-0.4*2),
(2.4,5-0.4*2),
(1.6,2+3*0.1/0.5),
(1.4,1+0.4/0.5),
(0.4,0.4),
(-.6,-0.6)
]
X = MX.sym("x")
J = Function("F",[X],[F(X)])
for a,r in pairs:
self.assertTrue(same(F(a), r))
self.check_codegen(F,inputs=[a])
X = MX.sym("x")
J = Function("F",[X],[jacobian(F(X),X)])
pairs = [
(3.4,-2),
(2.4,-2),
(1.6,6),
(1.4,2),
(0.4,1),
(-.6,1),
(1,2),
(0.99,1),
]
for a,r in pairs:
self.assertTrue(same(J(a), r))
self.check_codegen(J,inputs=[a])
def test_2d_interpolant(self):
grid = [[0, 1, 4, 5],
[0, 2, 3]]
values = [0, 1, 8, 3,
10, -11, 12, 13,
20, 31, -42, 53]
F = interpolant('F', 'linear', grid, values)
a0 = -11+0.4*(31+11)
a1 = 12+0.4*(-42-12)
pairs = [
(vertcat(1,2), -11),
(vertcat(1,3), 31),
(vertcat(4,2), 12),
(vertcat(4,3), -42),
(vertcat(1,2.4), a0),
(vertcat(4,2.4), a1),
(vertcat(3,2), -11+2.0/3*(12+11)),
(vertcat(3,3), 31+2.0/3*(-42-31)),
(vertcat(3,2.4), a0+2.0/3*(a1-a0))
]
for a,r in pairs:
self.checkarray(F(a), r)
self.check_codegen(F,inputs=[a])
X = MX.sym("x",2)
J = Function("F",[X],[jacobian(F(X),X)])
jx0 = (12+11)/3.0
jx1 = (-42-31)/3.0
jx2 = (13-12)
jx3 = (53+42)
jy0 = 31+11
jy1 = -42-12
pairs = [
(vertcat(1,2), vertcat(jx0,jy0)),
(vertcat(1,3), vertcat(jx1,jy0)),
(vertcat(4,2), vertcat(jx2,jy1)),
(vertcat(4,3), vertcat(jx3,jy1)),
(vertcat(1,2.4), vertcat(jx0+(jx1-jx0)*0.4, 31+11)),
(vertcat(4,2.4), vertcat(jx2+(jx3-jx2)*0.4, -42-12)),
(vertcat(3,2), vertcat(jx0,jy0+(jy1-jy0)*2.0/3)),
(vertcat(3,3), vertcat(jx1,jy0+(jy1-jy0)*2.0/3)),
(vertcat(3,2.4), vertcat(jx0+(jx1-jx0)*0.4,jy0+(jy1-jy0)*2.0/3)),
]
for a,r in pairs:
self.checkarray(J(a).T, r)
self.check_codegen(J,inputs=[a])
def test_1d_interpolant_uniform(self):
grid = [[0, 1, 2]]
values = [0, 1, 2]
for opts in [{"lookup_mode": ["linear"]},{"lookup_mode": ["exact"]}]:
F = interpolant('F', 'linear', grid, values, opts)
def same(a, b): return abs(float(a)-b)<1e-8
self.assertTrue(same(F(2.4), 2.4))
self.assertTrue(same(F(1.4), 1.4))
self.assertTrue(same(F(0), 0))
self.assertTrue(same(F(1), 1))
self.assertTrue(same(F(2), 2))
self.assertTrue(same(F(6), 6))
self.assertTrue(same(F(0.4), 0.4))
self.assertTrue(same(F(-.6), -.6))
F = interpolant('F', 'linear', [np.linspace(0,1,7)], list(range(7)), {"lookup_mode": ["exact"]})
grid = [[2, 4, 6]]
values = [10, 7, 1]
for opts in [{"lookup_mode": ["linear"]},{"lookup_mode": ["exact"]}]:
F = interpolant('F', 'linear', grid, values, opts)
def same(a, b): return abs(float(a)-b)<1e-8
self.assertTrue(same(F(1), 11.5))
self.assertTrue(same(F(2), 10))
self.assertTrue(same(F(3), 8.5))
self.assertTrue(same(F(4), 7))
self.assertTrue(same(F(5), 4))
self.assertTrue(same(F(6), 1))
self.assertTrue(same(F(7), -2))
F = interpolant('F', 'linear', [np.linspace(0,1,7)], list(range(7)), {"lookup_mode": ["exact"]})
def test_2d_interpolant_uniform(self):
grid = [[0, 1, 2], [0, 1, 2]]
values = [0, 1, 2, 10, 11, 12, 20, 21, 22]
for opts in [{"lookup_mode": ["linear","linear"]},{"lookup_mode": ["exact","exact"]}]:
F = interpolant('F', 'linear', grid, values, opts)
def same(a, b): return abs(float(a)-b)<1e-8
self.assertTrue(same(F([2.4, 0.5]), 7.4))
self.assertTrue(same(F([1.4, 0.5]), 6.4))
self.assertTrue(same(F([0.4, 0.5]), 5.4))
self.assertTrue(same(F([1, 0.5]), 6))
self.assertTrue(same(F([1, 0]), 1))
self.assertTrue(same(F([0, 0]), 0))
self.assertTrue(same(F([0.4, 1]), 10.4))
self.assertTrue(same(F([-.6, 0.5]), 4.4))
self.assertTrue(same(F([-.6, 1.5]), 14.4))
self.assertTrue(same(F([-.6, 2.5]), 24.4))
self.assertTrue(same(F([-.6, 3.5]), 34.4))
@skip(not scipy_interpolate)
def test_2d_bspline(self):
import scipy.interpolate
np.random.seed(0)
d_knots = [list(np.linspace(0,1,5)),list(np.linspace(0,1,6))]
data = np.random.random([len(e) for e in d_knots])
r = np.meshgrid(*d_knots,indexing='ij')
xyz = np.vstack(e.ravel(order='F') for e in r).ravel(order='F')
d_flat = data.ravel(order='F')
LUT = casadi.interpolant('name','bspline',d_knots,d_flat)
LUTJ = LUT.jacobian_old(0, 0)
LUTH = LUT.hessian_old(0, 0)
self.check_codegen(LUT, [vertcat(0.2,0.3)])
#scipy.interpolate.interpn(d_knots, data, [0.2,0.3], method='splinef2d')
interp = scipy.interpolate.RectBivariateSpline(d_knots[0], d_knots[1], data)
for x in [0,0.01,0.1,0.2,0.9,0.99,1]:
for y in [0,0.01,0.1,0.2,0.9,0.99,1]:
m = LUT([x,y])
r = interp.ev(x,y)
self.checkarray(m,r)
m = LUTJ([x,y])[0]
try:
r = [interp.ev(x,y, 1, 0), interp.ev(x,y, 0, 1)]
except:
r = None
if r is not None:
self.checkarray(m,r)
m = LUTH([x,y])[0]
try:
r = blockcat([[interp.ev(x,y, 2, 0),interp.ev(x,y, 1, 1)],[interp.ev(x,y, 1, 1), interp.ev(x,y, 0, 2)]])
except:
r = None
if r is not None:
self.checkarray(m,r)
@skip(not scipy_interpolate)
def test_1d_bspline(self):
import scipy.interpolate
np.random.seed(0)
d_knots = [list(np.linspace(0,1,5))]
data = np.random.random([len(e) for e in d_knots])
r = np.array(d_knots)
xyz = np.vstack(e.ravel(order='F') for e in r).ravel(order='F')
d_flat = data.ravel(order='F')
LUT = casadi.interpolant('name','bspline',d_knots,d_flat)
self.check_codegen(LUT, [0.2])
LUTJ = LUT.jacobian_old(0, 0)
LUTH = LUT.hessian_old(0, 0)
interp = scipy.interpolate.InterpolatedUnivariateSpline(d_knots[0], data)
for x in [0,0.01,0.1,0.2,0.9,0.99,1]:
m = LUT(x)
r = interp(x)
self.checkarray(m,r)
m = LUTJ(x)[0]
try:
r = interp(x, 1)
except:
r = None
if r is not None:
self.checkarray(m,r)
m = LUTH(x)[0]
try:
r = interp(x, 2)
except:
r = None
if r is not None:
self.checkarray(m,r)
def test_Callback_Jacobian(self):
x = MX.sym("x")
y = MX.sym("y")
num_inputs = [0.2,0.7]
g = Function("g", [x,y],[sin(x+3*y)])
class Fun(Callback):
# sin(x+3*y)
def __init__(self):
Callback.__init__(self)
self.construct("Fun", {})
def get_n_in(self): return 2
def get_n_out(self): return 1
def eval(self,arg):
x = arg[0]
y = arg[1]
z0 = 3*y
z1 = x+z0
z2 = sin(z1)
return [z2]
def get_n_forward(self): return 0
def get_n_reverse(self): return 0
def has_jacobian(self): return True
def get_jacobian(self, name, opts):
x = SX.sym("x")
y = SX.sym("y")
J = Function(name, [x,y],[horzcat(cos(x+3*y),3*cos(x+3*y))], opts)
return J
f = Fun()
self.checkfunction(f,g,inputs=num_inputs,fwd=False,adj=False,indirect=False)
def test_Callback_errors(self):
class Fun(Callback):
def __init__(self):
Callback.__init__(self)
self.construct("Fun", {})
def get_n_in(self): return 2
def get_n_out(self): return 1
def get_sparsity_in(i):
return 4
def eval(self,arg):
x = arg[0]
y = arg[1]
z0 = 3*y
z1 = x+z0
z2 = sin(z1)
return [z2]
try:
f = Fun()
except Exception as e:
s = str(e)
print(s)
self.assertTrue("get_sparsity_in" in s)
def test_Callback(self):
x = MX.sym("x")
y = MX.sym("y")
num_inputs = [0.2,0.7]
g = Function("g", [x,y],[sin(x+3*y)])
# Simple syntax
def getP(indirect=True):
class Fun(Callback):
def __init__(self):
Callback.__init__(self)
self.construct("Fun", {})
def get_n_in(self): return 2
def get_n_out(self): return 1
def eval(self,arg):
x = arg[0]
y = arg[1]
z0 = 3*y
z1 = x+z0
z2 = sin(z1)
return [z2]
f = Fun()
f.__disown__()
if not indirect:
return f
f = Function("f", [x,y],[f(x,y)])
return f
for indirect in [True,False]:
f = getP(indirect=indirect)
self.checkfunction(f,g,inputs=num_inputs,sens_der=False,jacobian=False,gradient=False,hessian=False,evals=False)
with self.assertRaises(Exception):
f.gradient()
with self.assertRaises(Exception):
f.jacobian_old(0, 0)
with self.assertRaises(Exception):
f.forward(1)
with self.assertRaises(Exception):
f.reverse(1)
def test_Callback_dimcheck(self):
class Fun(Callback):
def __init__(self):
Callback.__init__(self)
self.construct("Fun")
def get_n_in(self): return 2
def get_n_out(self): return 1
def eval(self,arg):
return [2, 1]
f = Fun()
s = ""
try:
f(2)
except Exception as e:
s = str(e)
self.assertTrue("Incorrect number of inputs" in s)
class Fun(Callback):
def __init__(self):
Callback.__init__(self)
self.construct("Fun")
def get_n_in(self): return 2
def get_n_out(self): return 1
def eval(self,arg):
return [2, 1]
f = Fun()
s = ""
try:
f(2,3)
except Exception as e:
s = str(e)
self.assertTrue("Callback::eval" in s)
s = ""
class Fun(Callback):
def __init__(self):
Callback.__init__(self)
self.construct("Fun")
def get_n_in(self): return 2
def get_n_out(self): return 1
def eval(self,arg):
return [DM.zeros(2,2)]
f = Fun()
try:
f(2,3)
except Exception as e:
s = str(e)
self.assertTrue("Callback::eval" in s)
def test_Callback_sens(self):
x = MX.sym("x")
y = MX.sym("y")
num_inputs = [0.2,0.7]
g = Function("g", [x,y],[sin(x+3*y)])
def getP(has_fwd=True,has_adj=True,indirect=True):
class Fun(Callback):
# sin(x+3*y)
def __init__(self,opts):
Callback.__init__(self)
self.construct("Fun", opts)
def get_n_in(self): return 2
def get_n_out(self): return 1
def eval(self,arg):
x = arg[0]
y = arg[1]
z0 = 3*y
z1 = x+z0
z2 = sin(z1)
return [z2]
if has_fwd:
def get_n_forward(self): return 1
def get_forward(self,name,nfwd,inames,onames,opts):
assert(nfwd==1)
class ForwardFun(Callback):
# sin(x+3*y)
def __init__(self):
Callback.__init__(self)
self.construct(name, {"verbose":True})
def get_n_in(self): return 2+1+2
def get_n_out(self): return 1
def eval(self,arg):
x,y = arg[0],arg[1]
z = arg[2]
seeds = arg[3:]
z0 = 3*y
z1 = x+z0
z2 = sin(z1)
ret = []
for i in range(3,len(arg),2):
dx = arg[i]
dy = arg[i+1]
dz0 = 3*dy
dz1 = dx+dz0
dz2 = cos(z1)*dz1
ret.append(dz2)
return ret
ffun = ForwardFun()
ffun.__disown__()
return ffun
if has_adj:
def get_n_reverse(self): return 1
def get_reverse(self,name,nadj,inames,onames,opts):
assert(nadj==1)
class BackwardFun(Callback):
# sin(x+3*y)
def __init__(self):
Callback.__init__(self)
self.construct(name, {"verbose":True})
def get_n_in(self): return 2+1+1
def get_n_out(self): return 2
def eval(self,arg):
x,y = arg[0],arg[1]
z = arg[2]
seeds = arg[3:]
z0 = 3*y
z1 = x+z0
z2 = sin(z1)
ret = []
for i in range(3,len(arg)):
z_bar = arg[i]
bx = 0
by = 0
bz1 = 0
bz0 = 0
bz2 = z_bar
bz1 += bz2*cos(z1)
bx+= bz1;bz0+= bz1
by+= 3*bz0
ret.append(bx)
ret.append(by)
return ret
bfun = BackwardFun()
bfun.__disown__()
return bfun
opts = {"verbose":True}
f = Fun(opts)
f.__disown__()
if not indirect:
return f
f = Function("f", [x,y],[f(x,y)])
return f
for indirect in [True,False]:
f = getP(has_fwd=True,has_adj=True,indirect=indirect)
self.checkfunction(f,g,inputs=num_inputs,sens_der=False,hessian=False,evals=1)
f = getP(has_fwd=True,has_adj=False,indirect=indirect)
self.checkfunction(f,g,inputs=num_inputs,sens_der=False,hessian=False,adj=False,evals=1)
f = getP(has_fwd=False,has_adj=True,indirect=indirect)
self.checkfunction(f,g,inputs=num_inputs,sens_der=False,hessian=False,fwd=False,evals=1)
f = getP(has_fwd=True,has_adj=False,indirect=indirect)
self.checkfunction(f,g,inputs=num_inputs,sens_der=False,hessian=False,adj=False,evals=1)
f = getP(has_fwd=False,has_adj=True,indirect=indirect)
self.checkfunction(f,g,inputs=num_inputs,sens_der=False,hessian=False,fwd=False,evals=1)
@requires_nlpsol("ipopt")
def test_common_specific_options(self):
x = SX.sym("x")
nlp = {"x": x, "f": x**2}
with capture() as out:
solver = nlpsol("solver","ipopt",nlp)
self.assertTrue("nlp_f" not in out[0])
with capture() as out:
solver = nlpsol("solver","ipopt",nlp,{"common_options":{"verbose":True}})
self.assertTrue("nlp_f" in out[0])
with capture() as out:
solver = nlpsol("solver","ipopt",nlp,{"specific_options":{ "nlp_f" : {"verbose":True}}})
self.assertTrue("nlp_f" in out[0])
with capture() as out:
solver = nlpsol("solver","ipopt",nlp,{"common_options":{"verbose":True},"specific_options":{ "nlp_f" : {"verbose":False}}})
self.assertTrue("nlp_f" not in out[0])
with capture() as out:
solver = nlpsol("solver","ipopt",nlp,{"common_options":{"verbose":False},"specific_options":{ "nlp_f" : {"verbose":True}}})
self.assertTrue("nlp_f" in out[0])
with capture() as out:
solver = nlpsol("solver","ipopt",nlp)
self.assertTrue(len(out[1])==0)
with capture() as out:
solver = nlpsol("solver","ipopt",nlp,{"specific_options":{ "nlp_foo" : {"verbose":True}}})
self.assertTrue("Ignoring" + out[1])
self.assertTrue("nlp_g" in out[1])
with self.assertRaises(Exception):
solver = nlpsol("solver","ipopt",nlp,{"specific_options":{ "nlp_foo" : 3}})
@requires_expm("slicot")
@memory_heavy()
def test_expm(self):
eps = 1e-6
t = MX.sym('t')
tnum = 0.2
n = 4
np.random.seed(0)
Anum = np.random.random((n,n))
Bnum = np.random.random((n,2))
Bb = np.random.random((n,2))
dA = np.random.random((n,n))
Yb = np.random.random((n,2))
def expm(A):
n = A.shape[0]
x = MX.sym('x',n)
As = MX.sym('A',n,n)
dae = {'x':x,'p':vec(As),'ode':mtimes(As,x)}
intg = integrator('intg','cvodes',dae,{'reltol':1e-14,'abstol':1e-14})
Intg = intg.map('identity','serial',n,[1],[])
out = Intg(x0=DM.eye(n),p=vec(As))
expmF = Function('expm',[As],[out["xf"]])
return expmF(A)
A = MX.sym("A",n,n)
t = MX.sym("t")
fr = Function('fr',[A,t],[expm(A*t)])
f = Function('f',[A,t],[casadi.expm(A*t)])
self.checkfunction(fr,f,inputs=[Anum, 1.1],digits=8)
fr = Function('fr',[t],[expm(Anum*t)])
f = Function('f',[t],[casadi.expm_const(Anum,t)])
self.checkfunction(fr,f,inputs=[1.1],digits=8)
JA = jacobian(casadi.expm(A*t),A)
Jt = jacobian(casadi.expm(A*t),t)
self.assertTrue(JA.nnz()==n**4)
self.assertTrue(Jt.nnz()==n**2)
JA = jacobian(casadi.expm_const(A,t),A)
Jt = jacobian(casadi.expm_const(A,t),t)
self.assertTrue(JA.nnz()==0)
self.assertTrue(Jt.nnz()==n**2)
def test_conditional(self):
np.random.seed(5)
x = MX.sym('x',2,2)
y = MX.sym('y',2,2)
sp1 = MX.sym('y',Sparsity.lower(2))
sp2 = MX.sym('z',Sparsity.diag(2))
f1 = Function("f",[sp2,x],[x**2,x*sp2])
f2 = Function("f",[sp1,x],[2*x**2,sin(sp1)])
f3 = Function("f",[sp1,sp2],[sp1*sp2,sp1+sp2])
F = Function.conditional("test",[f1,f2], f3)
Fsx = F.expand()
A = np.random.random((2,2))
B = np.random.random((2,2))
for i in range(-1,3):
self.checkfunction(F,Fsx,inputs = [i,A,B])
self.check_codegen(F,inputs=[i,A,B])
if __name__ == '__main__':
unittest.main()
|
lgpl-3.0
|
jamesrobertlloyd/gpss-research
|
source/utils/misc.py
|
4
|
7881
|
import collections
import itertools
import numpy as np
nax = np.newaxis
# import Image
import mkl_hack
import scipy.linalg
import scipy.stats
import random
def set_all_random_seeds(seed=0):
random.seed(seed)
np.random.seed(seed)
def sample_truncated_normal(loc=0, scale=1, min_value=-np.Inf):
'''Uses inverse cdf method - actually uses survival function sf = 1-cdf'''
return scipy.stats.norm.isf(np.random.rand() * scipy.stats.norm.sf(min_value, loc=loc, scale=scale), loc=loc, scale=scale)
def min_abs_diff(x):
'''Minimum absolute difference between all pairs in an iterable'''
return min([abs(i - j) if (i != j) else np.Inf for i in x for j in x])
def _err_string(arr1, arr2):
try:
if np.allclose(arr1, arr2):
return 'OK'
elif arr1.shape == arr2.shape:
return 'off by %s' % np.abs(arr1 - arr2).max()
else:
return 'incorrect shapes: %s and %s' % (arr1.shape, arr2.shape)
except:
return 'error comparing'
err_info = collections.defaultdict(list)
def set_err_info(key, info):
err_info[key] = info
def summarize_error(key):
"""Print a helpful description of the reason a condition was not satisfied. Intended usage:
assert pot1.allclose(pot2), summarize_error()"""
if type(err_info[key]) == str:
return ' ' + err_info[key]
else:
return '\n' + '\n'.join([' %s: %s' % (name, err) for name, err in err_info[key]]) + '\n'
def broadcast(idx, shape):
result = []
for i, d in zip(idx, shape):
if d == 1:
result.append(0)
else:
result.append(i)
return tuple(result)
def full_shape(shapes):
"""The shape of the full array that results from broadcasting the arrays of the given shapes."""
#return tuple(np.array(shapes).max(0))
temp = np.array(shapes)
temp1 = np.where((temp==0).any(0), 0, temp.max(0))
return tuple(temp1)
def array_map(fn, arrs, n):
"""Takes a list of arrays a_1, ..., a_n where the elements of the first n dimensions line up. For every possible
index into the first n dimensions, apply fn to the corresponding slices, and combine the results into
an n-dimensional array. Supports broadcasting but does not prepend 1's to the shapes."""
# we shouldn't need a special case for n == 0, but NumPy complains about indexing into a zero-dimensional
# array a using a[(Ellipsis,)].
if n == 0:
return fn(*arrs)
full_shape = tuple(np.array([a.shape[:n] for a in arrs]).max(0))
result = None
for full_idx in itertools.product(*map(range, full_shape)):
inputs = [a[broadcast(full_idx, a.shape[:n]) + (Ellipsis,)] for a in arrs]
curr = fn(*inputs)
if result is None:
if type(curr) == tuple:
result = tuple(np.zeros(full_shape + np.asarray(c).shape) for c in curr)
else:
result = np.zeros(full_shape + np.asarray(curr).shape)
if type(curr) == tuple:
for i, c in enumerate(curr):
result[i][full_idx + (Ellipsis,)] = c
else:
result[full_idx + (Ellipsis,)] = curr
return result
def extend_slice(slc, n):
if not isinstance(slc, tuple):
slc = (slc,)
#if any([isinstance(s, np.ndarray) for s in slc]):
# raise NotImplementedError('Advanced slicing not implemented yet')
return slc + (slice(None),) * n
def process_slice(slc, shape, n):
"""Takes a slice and returns the appropriate slice into an array that's being broadcast (i.e. by
converting the appropriate entries to 0's and :'s."""
if not isinstance(slc, tuple):
slc = (slc,)
slc = list(slc)
ndim = len(shape) - n
assert ndim >= 0
shape_idx = 0
for slice_idx, s in enumerate(slc):
if s == nax:
continue
if shape[shape_idx] == 1:
if type(s) == int:
slc[slice_idx] = 0
else:
slc[slice_idx] = slice(None)
shape_idx += 1
if shape_idx != ndim:
raise IndexError('Must have %d terms in the slice object' % ndim)
return extend_slice(tuple(slc), n)
def my_sum(a, axis, count):
"""For an array a which might be broadcast, return the value of a.sum() were a to be expanded out in full."""
if a.shape[axis] == count:
return a.sum(axis)
elif a.shape[axis] == 1:
return count * a.sum(axis)
else:
raise IndexError('Cannot be broadcast: a.shape=%s, axis=%d, count=%d' % (a.shape, axis, count))
def match_shapes(arrs):
"""Prepend 1's to the shapes so that the dimensions line up."""
#temp = [(name, np.asarray(a), deg) for name, a, deg in arrs]
#ndim = max([a.ndim - deg for _, a, deg in arrs])
temp = [a for name, a, deg in arrs]
for i in range(len(temp)):
if np.isscalar(temp[i]):
temp[i] = np.array(temp[i])
ndim = max([a.ndim - deg for a, (_, _, deg) in zip(temp, arrs)])
prep_arrs = []
for name, a, deg in arrs:
if np.isscalar(a):
a = np.asarray(a)
if a.ndim < deg:
raise RuntimeError('%s.ndim must be at least %d' % (name, deg))
if a.ndim < ndim + deg:
#a = a.reshape((1,) * (ndim + deg - a.ndim) + a.shape)
slc = (nax,) * (ndim + deg - a.ndim) + (Ellipsis,)
a = a[slc]
prep_arrs.append(a)
return prep_arrs
def lstsq(A, b):
# do this rather than call lstsq to support efficient broadcasting
P = array_map(np.linalg.pinv, [A], A.ndim - 2)
return array_map(np.dot, [P, b], A.ndim - 2)
def dot(A, b):
return array_map(np.dot, [A, b], A.ndim - 2)
def vdot(x, y):
return (x*y).sum(-1)
def my_inv(A):
"""Compute the inverse of a symmetric positive definite matrix."""
cho = scipy.linalg.flapack.dpotrf(A)
choinv = scipy.linalg.flapack.dtrtri(cho[0])
upper = scipy.linalg.flapack.dlauum(choinv[0])[0]
# upper is the upper triangular entries of A^{-1}, so need to fill in the
# lower triangular ones; unfortunately this has nontrivial overhead
temp = np.diag(upper)
return upper + upper.T - np.diag(temp)
def transp(A):
return A.swapaxes(-2, -1)
# def resize(arr, size):
# assert arr.ndim in [2, 3]
# if arr.ndim == 3:
# #return np.concatenate([shape_to_cons('**1', resize(arr[:,:,i], size))
# # for i in range(3)], axis=2)
# ans = np.concatenate([resize(arr[:,:,i], size)[:,:,nax] for i in range(3)], axis=2)
# return ans
# M, N = arr.shape
# assert arr.dtype in ['float64', 'float32']
# dtype = arr.dtype
# m, n = size
# if m is None:
# assert n is not None
# m = int(M * (float(n)/float(N)))
# if n is None:
# assert m is not None
# n = int(N * (float(m)/float(M)))
# result = np.array(Image.fromarray(arr.astype('float32'), 'F').resize((n, m), Image.ANTIALIAS),
# dtype=dtype)
# return result
# Pretty printing
try:
import termcolor
has_termcolor = True
except:
has_termcolor = False
try:
import config
color_scheme = config.COLOR_SCHEME
except:
color_scheme = 'dark'
def paren_colors():
if color_scheme == 'dark':
return ['red', 'green', 'cyan', 'magenta', 'yellow']
elif color_scheme == 'light':
return ['blue', 'red', 'magenta', 'green', 'cyan']
else:
raise RuntimeError('Unknown color scheme: %s' % color_scheme)
def colored(text, depth):
if has_termcolor:
colors = paren_colors()
color = colors[depth % len(colors)]
return termcolor.colored(text, color, attrs=['bold'])
else:
return text
def format_if_possible(format, value):
try:
return format % value
except:
return '%s' % value
|
mit
|
unicef/un-partner-portal
|
backend/unpp_api/apps/project/exports/pdf/cfei.py
|
1
|
10729
|
import hashlib
import os
import tempfile
from urllib.parse import quote
from collections import defaultdict
from babel.dates import get_timezone, format_datetime, format_date
from django.http import HttpResponse
from django.utils import timezone
from reportlab.lib import colors
from reportlab.lib.enums import TA_CENTER, TA_RIGHT
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Table, TableStyle, ListFlowable, ListItem, Image
from common.consts import SELECTION_CRITERIA_CHOICES
from common.mapping import render_point_to_image_file
from common.models import Point
from project.models import EOI, EOIAttachment
CRITERIA_DISPLAY_DICT = dict(SELECTION_CRITERIA_CHOICES)
IMAGE_WIDTH = 450
class CFEIPDFExporter:
def __init__(self, cfei, timezone_name='UTC'):
self.cfei: EOI = cfei
self.tzinfo = get_timezone(timezone_name)
filename = hashlib.sha256(str(cfei.id).encode()).hexdigest()
self.file_path = os.path.join(tempfile.gettempdir(), filename + '.pdf')
styles = getSampleStyleSheet()
styles.add(ParagraphStyle(name='Center', alignment=TA_CENTER))
styles.add(ParagraphStyle(name='SmallRight', alignment=TA_CENTER))
self.style_center = styles["Center"]
self.style_normal = styles["Normal"]
self.style_right = styles["SmallRight"]
self.style_h1 = styles["Heading1"]
self.style_h3 = styles["Heading3"]
self.style_h4 = styles["Heading4"]
self.style_h1.alignment = TA_CENTER
self.style_h3.alignment = TA_CENTER
self.style_right.alignment = TA_RIGHT
self.style_right.fontSize = 8
self.margin = 24
def get_timeline_table(self):
if self.cfei.is_open:
table_rows = [
[
'Posted',
format_date(self.cfei.published_timestamp),
],
[
'Clarification Request Deadline',
format_date(self.cfei.clarification_request_deadline_date),
],
[
'Application Deadline',
format_date(self.cfei.deadline_date),
],
[
'Notification of Results',
format_date(self.cfei.notif_results_date),
],
[
'Start Date',
format_date(self.cfei.start_date),
],
[
'End Date',
format_date(self.cfei.end_date),
],
]
else:
table_rows = [
[
'Posted',
format_date(self.cfei.published_timestamp),
],
[
'Start Date',
format_date(self.cfei.start_date),
],
[
'End Date',
format_date(self.cfei.end_date),
],
]
table = Table(table_rows, colWidths='*')
table.setStyle(TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('BACKGROUND', (0, 0), (0, -1), colors.darkgrey),
('TEXTCOLOR', (0, 0), (0, -1), colors.white),
]))
return table
def get_selection_criteria_table(self):
table_rows = [
['Name', 'Description', 'Weight'],
]
for criteria in self.cfei.assessments_criteria:
table_rows.append([
CRITERIA_DISPLAY_DICT[criteria['selection_criteria']],
Paragraph(criteria.get('description', ''), style=self.style_normal),
Paragraph(str(criteria.get('weight', 'N/A')), style=self.style_normal),
])
table = Table(table_rows, colWidths=['45%', '45%', '*'])
table.setStyle(TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('BACKGROUND', (0, 0), (-1, 0), colors.darkgrey),
('TEXTCOLOR', (0, 0), (-1, 0), colors.white),
]))
return table
def get_specializations_grouped_by_sector(self):
grouped = defaultdict(list)
for specialization in self.cfei.specializations.all():
grouped[specialization.category.name].append(specialization.name)
paragraph = ListFlowable(([
*[
Paragraph(sector_name, style=self.style_normal),
ListFlowable([
Paragraph(spec, style=self.style_normal) for spec in sorted(grouped[sector_name])
], bulletType='a'),
Spacer(1, self.margin / 3),
]
] for sector_name in sorted(grouped.keys())), bulletType='A')
return paragraph
def get_locations(self):
grouped = defaultdict(list)
point: Point
for point in self.cfei.locations.all():
grouped[point.admin_level_1.country_name].append(point)
paragraphs = []
for country_name, point_list in grouped.items():
subitems = []
for point in point_list:
rendered_point_filename = render_point_to_image_file(
point,
height=IMAGE_WIDTH // 2,
width=IMAGE_WIDTH
)
point_paragraphs = [
Paragraph(point.admin_level_1.name, style=self.style_normal),
]
if rendered_point_filename:
point_paragraphs.extend((
Spacer(1, self.margin / 4),
Image(rendered_point_filename),
))
subitems.append(point_paragraphs)
paragraphs.append([
Paragraph(country_name, style=self.style_normal),
ListFlowable(subitems, bulletType='a'),
Spacer(1, self.margin / 3),
])
return ListFlowable(paragraphs, bulletType='A')
def get_attachments_table(self):
table_rows = [
['Description', 'URL'],
]
attachment: EOIAttachment
for attachment in self.cfei.attachments.all():
table_rows.append([
Paragraph(attachment.description, style=self.style_normal),
Paragraph(attachment.file.file_field.url, style=self.style_normal),
])
table = Table(table_rows, colWidths='*')
table.setStyle(TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('BACKGROUND', (0, 0), (-1, 0), colors.darkgrey),
('TEXTCOLOR', (0, 0), (-1, 0), colors.white),
]))
return table
def generate(self):
document = SimpleDocTemplate(
self.file_path,
title=self.cfei.title,
rightMargin=self.margin,
leftMargin=self.margin,
topMargin=self.margin,
bottomMargin=self.margin
)
paragraphs = []
timestamp = timezone.now()
paragraphs.append(Paragraph(
format_datetime(timestamp, 'medium', tzinfo=self.tzinfo), self.style_right
))
if self.cfei.is_open:
header = 'Call for Expression of Interest'
else:
header = 'Direct Selection / Retention'
paragraphs.append(Paragraph(header, self.style_h3))
paragraphs.append(Paragraph(self.cfei.title, self.style_h1))
paragraphs.append(Spacer(1, self.margin))
main_content = [
ListItem([
Paragraph('Timeline', style=self.style_h4),
self.get_timeline_table(),
Spacer(1, self.margin / 2)
]),
ListItem([
Paragraph('Locations', style=self.style_h4),
self.get_locations(),
Spacer(1, self.margin / 2)
]),
ListItem([
Paragraph('Sector(s) and area(s) of specialization', style=self.style_h4),
self.get_specializations_grouped_by_sector(),
Spacer(1, self.margin / 2)
]),
ListItem([
Paragraph('Issuing Agency', style=self.style_h4),
Paragraph(self.cfei.agency.name, style=self.style_normal),
Spacer(1, self.margin / 2)
]),
ListItem([
Paragraph('Project Background', style=self.style_h4),
Paragraph(self.cfei.description, style=self.style_normal),
Spacer(1, self.margin / 2)
]),
ListItem([
Paragraph('Expected Results', style=self.style_h4),
Paragraph(self.cfei.goal or '-', style=self.style_normal),
Spacer(1, self.margin / 2)
]),
ListItem([
Paragraph('Other Information', style=self.style_h4),
Paragraph(self.cfei.other_information or '-', style=self.style_normal),
Spacer(1, self.margin / 2)
]),
]
if self.cfei.is_open:
main_content.append(ListItem([
Paragraph('Selection Criteria', style=self.style_h4),
self.get_selection_criteria_table(),
Spacer(1, self.margin / 2)
]))
if self.cfei.attachments.exists():
main_content.append(ListItem([
Paragraph('Attachments', style=self.style_h4),
self.get_attachments_table(),
Spacer(1, self.margin / 2)
]))
if self.cfei.is_open:
cn_template = self.cfei.agency.profile.eoi_template
main_content.append(ListItem([
Paragraph('Concept Note Template', style=self.style_h4),
Paragraph(cn_template.url if cn_template else '-', style=self.style_normal),
]))
paragraphs.append(ListFlowable(main_content))
document.build(paragraphs)
def get_as_response(self):
self.generate()
response = HttpResponse()
response.content_type = 'application/pdf'
with open(self.file_path, 'rb') as content:
response.write(content.read())
self.cleanup()
response['Content-Disposition'] = 'attachment; filename="{}"'.format(f'{quote(self.cfei.title)}.pdf')
return response
def cleanup(self):
os.remove(self.file_path)
|
apache-2.0
|
BladeSmithJohn/nixysa
|
nixysa/nullable_binding.py
|
9
|
24217
|
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""nullable binding model module.
This module implements the glue functions for the nullable binding model,
used for nullable types.
In C++, objects using this binding model are represented by a pointer.
For JS bindings, the nullable type is represented by a JavaScript reference.
"""
import by_pointer_binding
import string
class InvalidNullableUsage(Exception):
"""Raised when a nullable is used in conjuction with a type that is not a
pointer pointer binding."""
pass
def JavaMemberString(scope, type_defn):
"""Gets the representation of a member name in Java.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition for the type.
Returns:
a string representing the type
"""
data_type = type_defn.GetFinalType().data_type
data_type_bm = data_type.binding_model
return data_type_bm.JavaMemberString(scope, data_type)
def CppTypedefString(scope, type_defn):
"""Gets the representation of a type when used in a C++ typedef.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition for the type.
Returns:
a (string, boolean) pair, the first element being the representation of
the type, the second element indicating whether or not the definition of
the type is needed for the expression to be valid.
"""
data_type = type_defn.GetFinalType().data_type
data_type_bm = data_type.binding_model
return data_type_bm.CppTypedefString(scope, data_type)
def CppMemberString(scope, type_defn):
"""Gets the representation of a type when used as a C++ class member.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition for the type.
Returns:
a (string, boolean) pair, the first element being the representation of
the type, the second element indicating whether or not the definition of
the type is needed for the expression to be valid.
"""
data_type = type_defn.GetFinalType().data_type
data_type_bm = data_type.binding_model
return data_type_bm.CppMemberString(scope, data_type)
def CppReturnValueString(scope, type_defn):
"""Gets the representation of a type when used as a C++ function return value.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition for the type.
Returns:
a (string, boolean) pair, the first element being the representation of
the type, the second element indicating whether or not the definition of
the type is needed for the expression to be valid.
"""
data_type = type_defn.GetFinalType().data_type
data_type_bm = data_type.binding_model
return data_type_bm.CppReturnValueString(scope, data_type)
def CppParameterString(scope, type_defn):
"""Gets the representation of a type when used for a function parameter.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition for the type.
Returns:
a (string, boolean) pair, the first element being the representation of
the type, the second element indicating whether or not the definition of
the type is needed for the expression to be valid.
"""
data_type = type_defn.GetFinalType().data_type
data_type_bm = data_type.binding_model
return data_type_bm.CppParameterString(scope, data_type)
def CppMutableParameterString(scope, type_defn):
"""Gets the representation of a type for a mutable function parameter.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition for the type.
Returns:
a (string, boolean) pair, the first element being the string representing
the type, the second element indicating whether or not the definition of
the type is needed for the expression to be valid.
"""
data_type = type_defn.GetFinalType().data_type
data_type_bm = data_type.binding_model
return data_type_bm.CppMutableParameterString(scope, data_type)
def CppMutableToNonMutable(scope, type_defn, expr):
"""Gets the string converting a mutable expression to a non-mutable one.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition for the type.
expr: a string for the mutable expression.
Returns:
a string, which is the non-mutable expression.
"""
data_type = type_defn.GetFinalType().data_type
data_type_bm = data_type.binding_model
return data_type_bm.CppMutableToNonMutable(scope, data_type, expr)
def CppBaseClassString(scope, type_defn):
"""Gets the representation of a type for a base class.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition for the type.
Returns:
a (string, boolean) pair, the first element being the string representing
the type, the second element indicating whether or not the definition of
the type is needed for the expression to be valid.
Raises:
InvalidNullableUsage: always. This function should not be called on a
nullable.
"""
raise InvalidNullableUsage
def CppCallMethod(scope, type_defn, object_expr, mutable, method, param_exprs):
"""Gets the representation of a member function call.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition, representing the type of the object being called.
object_expr: a string, which is the expression for the object being called.
mutable: a boolean, whether or not the 'object_expr' expression is mutable
or not
method: a Function, representing the function to call.
param_exprs: a list of strings, each being the expression for the value of
each parameter.
Returns:
a string, which is the expression for the function call.
Raises:
InvalidNullableUsage: always. This function should not be called on a
nullable.
"""
return InvalidNullableUsage
def CppCallStaticMethod(scope, type_defn, method, param_exprs):
"""Gets the representation of a static function call.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition, representing the type of the object being called.
method: a Function, representing the function to call.
param_exprs: a list of strings, each being the expression for the value of
each parameter.
Returns:
a string, which is the expression for the function call.
Raises:
InvalidNullableUsage: always. This function should not be called on a
nullable.
"""
return InvalidNullableUsage
def CppCallConstructor(scope, type_defn, method, param_exprs):
"""Gets the representation of a constructor call.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition, representing the type of the object being called.
method: a Function, representing the constructor to call.
param_exprs: a list of strings, each being the expression for the value of
each parameter.
Returns:
a string, which is the expression for the constructor call.
Raises:
InvalidNullableUsage: always. This function should not be called on a
nullable.
"""
return InvalidNullableUsage
def CppSetField(scope, type_defn, object_expr, field, param_expr):
"""Gets the representation of an expression setting a field in an object.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition, representing the type of the object containing the
field being set.
object_expr: a string, which is the expression for the object containing
the field being set.
field: a string, the name of the field to be set.
param_expr: a strings, being the expression for the value to be set.
Returns:
a string, which is the expression for setting the field.
Raises:
InvalidNullableUsage: always. This function should not be called on a
nullable.
"""
return InvalidNullableUsage
def CppGetField(scope, type_defn, object_expr, field):
"""Gets the representation of an expression getting a field in an object.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition, representing the type of the object containing the
field being retrieved.
object_expr: a string, which is the expression for the object containing
the field being retrieved.
field: a string, the name of the field to be retrieved.
Returns:
a string, which is the expression for getting the field.
Raises:
InvalidNullableUsage: always. This function should not be called on a
nullable.
"""
return InvalidNullableUsage
def CppSetStatic(scope, type_defn, field, param_expr):
"""Gets the representation of an expression setting a static field.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition, representing the type of the object containing the
field being set.
field: a string, the name of the field to be set.
param_expr: a strings, being the expression for the value to be set.
Returns:
a string, which is the expression for setting the field.
Raises:
InvalidNullableUsage: always. This function should not be called on a
nullable.
"""
return InvalidNullableUsage
def CppGetStatic(scope, type_defn, field):
"""Gets the representation of an expression getting a static field.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition, representing the type of the object containing the
field being retrieved.
field: a string, the name of the field to be retrieved.
Returns:
a string, which is the expression for getting the field.
Raises:
InvalidNullableUsage: always. This function should not be called on a
nullable.
"""
return InvalidNullableUsage
def JSDocTypeString(type_defn):
"""Gets the representation of a type in JSDoc notation.
Args:
type_defn: a Definition for the type.
Returns:
a string that is the JSDoc notation of type_defn.
"""
type_defn = type_defn.GetFinalType()
element_type_defn = type_defn.data_type.GetFinalType()
type = element_type_defn.binding_model.JSDocTypeString(element_type_defn)
if type[0] == '!':
type = type[1:]
return type
def NpapiBindingGlueHeader(scope, type_defn):
"""Gets the NPAPI glue header for a given type.
Args:
scope: a Definition for the scope in which the glue will be written.
type_defn: a Definition, representing the type.
Returns:
a string, the glue header.
Raises:
InvalidNullableUsage: always. This function should not be called on a
nullable.
"""
return InvalidNullableUsage
def NpapiBindingGlueCpp(scope, type_defn):
"""Gets the NPAPI glue implementation for a given type.
Args:
scope: a Definition for the scope in which the glue will be written.
type_defn: a Definition, representing the type.
Returns:
a string, the glue implementation.
Raises:
InvalidNullableUsage: always. This function should not be called on a
nullable.
"""
return InvalidNullableUsage
def NpapiDispatchFunctionHeader(scope, type_defn, variable, npp, success):
"""Gets a header for NPAPI glue dispatch functions.
This function creates a string containing a C++ code snippet that should be
included at the beginning of NPAPI glue dispatch functions like Invoke or
GetProperty. This code snippet will declare and initialize certain variables
that will be used in the dispatch functions, like the NPObject representing
the object, or a pointer to the NPP instance.
First it checks whether the NPVariant is null. If so it simply sets the value
to null. It relies on the later compilation of the glue to detect when it is
used with a binding model that cannot be used with nthe value null. It is
binding model independent.
Args:
scope: a Definition for the scope in which the glue will be written.
type_defn: a Definition, representing the type.
variable: a string, representing a name of a variable that can be used to
store a reference to the object.
npp: a string, representing the name of the variable that holds the pointer
to the NPP instance. Will be declared by the code snippet.
success: the name of a bool variable containing the current success status.
(is not declared by the code snippet).
Returns:
a (string, string) pair, the first string being the code snippet, and the
second string being an expression to access the object.
Raises:
InvalidNullableUsage: always. This function should not be called on a
nullable.
"""
return InvalidNullableUsage
_from_npvariant_template = string.Template("""
${Type} ${variable};
if (NPVARIANT_IS_NULL(${input})) {
${variable} = NULL;
} else {
${text}
${variable} = ${value};
}
""")
def NpapiFromNPVariant(scope, type_defn, input_expr, variable, success,
exception_context, npp):
"""Gets the string to get a value from a NPVariant.
This function creates a string containing a C++ code snippet that is used to
retrieve a value from a NPVariant. If an error occurs, like if the NPVariant
is not of the correct type, the snippet will set the success status variable
to false.
Args:
scope: a Definition for the scope in which the glue will be written.
type_defn: a Definition, representing the type of the value.
input_expr: an expression representing the NPVariant to get the value from.
variable: a string, representing a name of a variable that can be used to
store a reference to the value.
success: the name of a bool variable containing the current success status.
exception_context: the name of a string containing context information, for
use in exception reporting.
npp: a string, representing the name of the variable that holds the pointer
to the NPP instance.
Returns:
a (string, string) pair, the first string being the code snippet and the
second one being the expression to access that value.
"""
data_type = type_defn.GetFinalType().data_type
data_type_bm = data_type.binding_model
text, value = data_type_bm.NpapiFromNPVariant(
scope,
data_type,
input_expr,
variable + '_nullable',
success,
exception_context,
npp);
data_type_name, dummy = data_type_bm.CppMemberString(scope, data_type)
nullable_text = _from_npvariant_template.substitute(
Type=data_type_name,
variable=variable,
text=text,
input=input_expr,
value=value)
return (nullable_text, variable)
_to_npvariant_pre_template = string.Template("""
${pre_text}
if (!${variable}) {
success = true;
}
""")
_to_npvariant_post_template = string.Template("""
if (${variable}) {
${post_text}
} else {
NULL_TO_NPVARIANT(*${output});
}
""")
def NpapiExprToNPVariant(scope, type_defn, variable, expression, output,
success, npp):
"""Gets the string to store a value into a NPVariant.
This function creates a string containing a C++ code snippet that is used to
store a value into a NPVariant. That operation takes two phases, one that
allocates necessary NPAPI resources, and that can fail, and one that actually
sets the NPVariant (that can't fail). If an error occurs, the snippet will
set the success status variable to false.
Args:
scope: a Definition for the scope in which the glue will be written.
type_defn: a Definition, representing the type of the value.
variable: a string, representing a name of a variable that can be used to
store a reference to the value.
expression: a string representing the expression that yields the value to
be stored.
output: an expression representing a pointer to the NPVariant to store the
value into.
success: the name of a bool variable containing the current success status.
npp: a string, representing the name of the variable that holds the pointer
to the NPP instance.
Returns:
a (string, string) pair, the first string being the code snippet for the
first phase, and the second one being the code snippet for the second phase.
"""
data_type = type_defn.GetFinalType().data_type
data_type_bm = data_type.binding_model
pre_text, post_text = data_type_bm.NpapiExprToNPVariant(
scope,
data_type,
variable,
expression,
output,
success,
npp)
nullable_pre_text = _to_npvariant_pre_template.substitute(
variable=variable,
npp=npp,
pre_text=pre_text,
success=success)
nullable_post_text = _to_npvariant_post_template.substitute(
variable=variable,
output=output,
npp=npp,
post_text=post_text,
success=success)
return nullable_pre_text, nullable_post_text
def PpapiBindingGlueHeader(scope, type_defn):
"""Gets the PPAPI glue header for a given type.
Args:
scope: a Definition for the scope in which the glue will be written.
type_defn: a Definition, representing the type.
Returns:
a string, the glue header.
Raises:
InvalidNullableUsage: always. This function should not be called on a
nullable.
"""
raise InvalidNullableUsage
def PpapiBindingGlueCpp(scope, type_defn):
"""Gets the PPAPI glue implementation for a given type.
Args:
scope: a Definition for the scope in which the glue will be written.
type_defn: a Definition, representing the type.
Returns:
a string, the glue implementation.
Raises:
InvalidNullableUsage: always. This function should not be called on a
nullable.
"""
raise InvalidNullableUsage
def PpapiDispatchFunctionHeader(scope, type_defn, variable, npp, success):
"""Gets a header for PPAPI glue dispatch functions.
This function creates a string containing a C++ code snippet that should be
included at the beginning of PPAPI glue dispatch functions like Call or
GetProperty. This code snippet will declare and initialize certain variables
that will be used in the dispatch functions, like the pp::Var representing
the object, or a pointer to the pp::Instance.
First it checks whether the pp::Var is null. If so it simply sets the value
to null. It relies on the later compilation of the glue to detect when it is
used with a binding model that cannot be used with the value null. It is
binding model independent.
Args:
scope: a Definition for the scope in which the glue will be written.
type_defn: a Definition, representing the type.
variable: a string, representing a name of a variable that can be used to
store a reference to the object.
npp: a string, representing the name of the variable that holds the pointer
to the pp::Instance. Will be declared by the code snippet.
success: the name of a bool variable containing the current success status.
(is not declared by the code snippet).
Returns:
a (string, string) pair, the first string being the code snippet, and the
second string being an expression to access the object.
Raises:
InvalidNullableUsage: always. This function should not be called on a
nullable.
"""
raise InvalidNullableUsage
_ppapi_from_ppvar_template = string.Template("""
${Type} ${variable};
if (${input}.is_null()) {
${variable} = NULL;
} else {
${text}
${variable} = ${value};
}
""")
def PpapiFromPPVar(scope, type_defn, input_expr, variable, success,
exception_context, npp):
"""Gets the string to get a value from a pp::Var.
This function creates a string containing a C++ code snippet that is used to
retrieve a value from a pp::Var. If an error occurs, like if the pp::Var
is not of the correct type, the snippet will set the success status variable
to false.
Args:
scope: a Definition for the scope in which the glue will be written.
type_defn: a Definition, representing the type of the value.
input_expr: an expression representing the pp::Var to get the value from.
variable: a string, representing a name of a variable that can be used to
store a reference to the value.
success: the name of a bool variable containing the current success status.
exception_context: the name of a string containing context information, for
use in exception reporting.
npp: a string, representing the name of the variable that holds the pointer
to the pp::Instance.
Returns:
a (string, string) pair, the first string being the code snippet and the
second one being the expression to access that value.
"""
data_type = type_defn.GetFinalType().data_type
data_type_bm = data_type.binding_model
text, value = data_type_bm.PpapiFromPPVar(
scope,
data_type,
input_expr,
variable + '_nullable',
success,
exception_context,
npp);
data_type_name, dummy = data_type_bm.CppMemberString(scope, data_type)
nullable_text = _ppapi_from_ppvar_template.substitute(
Type=data_type_name,
variable=variable,
text=text,
input=input_expr,
value=value)
return (nullable_text, variable)
_ppapi_to_ppvar_pre_template = string.Template("""
${pre_text}
if (!${variable}) {
${success} = true;
*exception = pp::Var();
}
""")
_ppapi_to_ppvar_post_template = string.Template("""
if (${variable}) {
${post_text}
} else {
*${output} = pp::Var::Null();
}
""")
def PpapiExprToPPVar(scope, type_defn, variable, expression, output,
success, npp):
"""Gets the string to store a value into a pp::Var.
This function creates a string containing a C++ code snippet that is used to
store a value into a pp::Var. That operation takes two phases, one that
allocates necessary PPAPI resources, and that can fail, and one that actually
sets the pp::Var (that can't fail). If an error occurs, the snippet will
set the success status variable to false.
Args:
scope: a Definition for the scope in which the glue will be written.
type_defn: a Definition, representing the type of the value.
variable: a string, representing a name of a variable that can be used to
store a reference to the value.
expression: a string representing the expression that yields the value to
be stored.
output: an expression representing a pointer to the pp::Var to store the
value into.
success: the name of a bool variable containing the current success status.
npp: a string, representing the name of the variable that holds the pointer
to the pp::Instance.
Returns:
a (string, string) pair, the first string being the code snippet for the
first phase, and the second one being the code snippet for the second phase.
"""
data_type = type_defn.GetFinalType().data_type
data_type_bm = data_type.binding_model
pre_text, post_text = data_type_bm.PpapiExprToPPVar(
scope,
data_type,
variable,
expression,
output,
success,
npp)
nullable_pre_text = _ppapi_to_ppvar_pre_template.substitute(
variable=variable,
npp=npp,
pre_text=pre_text,
success=success)
nullable_post_text = _ppapi_to_ppvar_post_template.substitute(
variable=variable,
output=output,
npp=npp,
post_text=post_text,
success=success)
return nullable_pre_text, nullable_post_text
def main():
pass
if __name__ == '__main__':
main()
|
apache-2.0
|
samdowd/drumm-farm
|
drumm_env/lib/python2.7/site-packages/boto/cloudtrail/exceptions.py
|
127
|
2246
|
"""
Exceptions that are specific to the cloudtrail module.
"""
from boto.exception import BotoServerError
class InvalidSnsTopicNameException(BotoServerError):
"""
Raised when an invalid SNS topic name is passed to Cloudtrail.
"""
pass
class InvalidS3BucketNameException(BotoServerError):
"""
Raised when an invalid S3 bucket name is passed to Cloudtrail.
"""
pass
class TrailAlreadyExistsException(BotoServerError):
"""
Raised when the given trail name already exists.
"""
pass
class InsufficientSnsTopicPolicyException(BotoServerError):
"""
Raised when the SNS topic does not allow Cloudtrail to post
messages.
"""
pass
class InvalidTrailNameException(BotoServerError):
"""
Raised when the trail name is invalid.
"""
pass
class InternalErrorException(BotoServerError):
"""
Raised when there was an internal Cloudtrail error.
"""
pass
class TrailNotFoundException(BotoServerError):
"""
Raised when the given trail name is not found.
"""
pass
class S3BucketDoesNotExistException(BotoServerError):
"""
Raised when the given S3 bucket does not exist.
"""
pass
class TrailNotProvidedException(BotoServerError):
"""
Raised when no trail name was provided.
"""
pass
class InvalidS3PrefixException(BotoServerError):
"""
Raised when an invalid key prefix is given.
"""
pass
class MaximumNumberOfTrailsExceededException(BotoServerError):
"""
Raised when no more trails can be created.
"""
pass
class InsufficientS3BucketPolicyException(BotoServerError):
"""
Raised when the S3 bucket does not allow Cloudtrail to
write files into the prefix.
"""
pass
class InvalidMaxResultsException(BotoServerError):
pass
class InvalidTimeRangeException(BotoServerError):
pass
class InvalidLookupAttributesException(BotoServerError):
pass
class InvalidCloudWatchLogsLogGroupArnException(BotoServerError):
pass
class InvalidCloudWatchLogsRoleArnException(BotoServerError):
pass
class CloudWatchLogsDeliveryUnavailableException(BotoServerError):
pass
class InvalidNextTokenException(BotoServerError):
pass
|
mit
|
adishjain/youtube-dl
|
youtube_dl/extractor/common.py
|
6
|
55808
|
from __future__ import unicode_literals
import base64
import datetime
import hashlib
import json
import netrc
import os
import re
import socket
import sys
import time
import xml.etree.ElementTree
from ..compat import (
compat_cookiejar,
compat_cookies,
compat_getpass,
compat_HTTPError,
compat_http_client,
compat_urllib_error,
compat_urllib_parse,
compat_urllib_parse_urlparse,
compat_urllib_request,
compat_urlparse,
compat_str,
)
from ..utils import (
NO_DEFAULT,
age_restricted,
bug_reports_message,
clean_html,
compiled_regex_type,
determine_ext,
ExtractorError,
fix_xml_ampersands,
float_or_none,
int_or_none,
RegexNotFoundError,
sanitize_filename,
unescapeHTML,
url_basename,
xpath_text,
xpath_with_ns,
)
class InfoExtractor(object):
"""Information Extractor class.
Information extractors are the classes that, given a URL, extract
information about the video (or videos) the URL refers to. This
information includes the real video URL, the video title, author and
others. The information is stored in a dictionary which is then
passed to the YoutubeDL. The YoutubeDL processes this
information possibly downloading the video to the file system, among
other possible outcomes.
The type field determines the type of the result.
By far the most common value (and the default if _type is missing) is
"video", which indicates a single video.
For a video, the dictionaries must include the following fields:
id: Video identifier.
title: Video title, unescaped.
Additionally, it must contain either a formats entry or a url one:
formats: A list of dictionaries for each format available, ordered
from worst to best quality.
Potential fields:
* url Mandatory. The URL of the video file
* ext Will be calculated from URL if missing
* format A human-readable description of the format
("mp4 container with h264/opus").
Calculated from the format_id, width, height.
and format_note fields if missing.
* format_id A short description of the format
("mp4_h264_opus" or "19").
Technically optional, but strongly recommended.
* format_note Additional info about the format
("3D" or "DASH video")
* width Width of the video, if known
* height Height of the video, if known
* resolution Textual description of width and height
* tbr Average bitrate of audio and video in KBit/s
* abr Average audio bitrate in KBit/s
* acodec Name of the audio codec in use
* asr Audio sampling rate in Hertz
* vbr Average video bitrate in KBit/s
* fps Frame rate
* vcodec Name of the video codec in use
* container Name of the container format
* filesize The number of bytes, if known in advance
* filesize_approx An estimate for the number of bytes
* player_url SWF Player URL (used for rtmpdump).
* protocol The protocol that will be used for the actual
download, lower-case.
"http", "https", "rtsp", "rtmp", "rtmpe",
"m3u8", or "m3u8_native".
* preference Order number of this format. If this field is
present and not None, the formats get sorted
by this field, regardless of all other values.
-1 for default (order by other properties),
-2 or smaller for less than default.
< -1000 to hide the format (if there is
another one which is strictly better)
* language_preference Is this in the correct requested
language?
10 if it's what the URL is about,
-1 for default (don't know),
-10 otherwise, other values reserved for now.
* quality Order number of the video quality of this
format, irrespective of the file format.
-1 for default (order by other properties),
-2 or smaller for less than default.
* source_preference Order number for this video source
(quality takes higher priority)
-1 for default (order by other properties),
-2 or smaller for less than default.
* http_headers A dictionary of additional HTTP headers
to add to the request.
* stretched_ratio If given and not 1, indicates that the
video's pixels are not square.
width : height ratio as float.
* no_resume The server does not support resuming the
(HTTP or RTMP) download. Boolean.
url: Final video URL.
ext: Video filename extension.
format: The video format, defaults to ext (used for --get-format)
player_url: SWF Player URL (used for rtmpdump).
The following fields are optional:
alt_title: A secondary title of the video.
display_id An alternative identifier for the video, not necessarily
unique, but available before title. Typically, id is
something like "4234987", title "Dancing naked mole rats",
and display_id "dancing-naked-mole-rats"
thumbnails: A list of dictionaries, with the following entries:
* "id" (optional, string) - Thumbnail format ID
* "url"
* "preference" (optional, int) - quality of the image
* "width" (optional, int)
* "height" (optional, int)
* "resolution" (optional, string "{width}x{height"},
deprecated)
thumbnail: Full URL to a video thumbnail image.
description: Full video description.
uploader: Full name of the video uploader.
creator: The main artist who created the video.
timestamp: UNIX timestamp of the moment the video became available.
upload_date: Video upload date (YYYYMMDD).
If not explicitly set, calculated from timestamp.
uploader_id: Nickname or id of the video uploader.
location: Physical location where the video was filmed.
subtitles: The available subtitles as a dictionary in the format
{language: subformats}. "subformats" is a list sorted from
lower to higher preference, each element is a dictionary
with the "ext" entry and one of:
* "data": The subtitles file contents
* "url": A URL pointing to the subtitles file
automatic_captions: Like 'subtitles', used by the YoutubeIE for
automatically generated captions
duration: Length of the video in seconds, as an integer.
view_count: How many users have watched the video on the platform.
like_count: Number of positive ratings of the video
dislike_count: Number of negative ratings of the video
average_rating: Average rating give by users, the scale used depends on the webpage
comment_count: Number of comments on the video
comments: A list of comments, each with one or more of the following
properties (all but one of text or html optional):
* "author" - human-readable name of the comment author
* "author_id" - user ID of the comment author
* "id" - Comment ID
* "html" - Comment as HTML
* "text" - Plain text of the comment
* "timestamp" - UNIX timestamp of comment
* "parent" - ID of the comment this one is replying to.
Set to "root" to indicate that this is a
comment to the original video.
age_limit: Age restriction for the video, as an integer (years)
webpage_url: The URL to the video webpage, if given to youtube-dl it
should allow to get the same result again. (It will be set
by YoutubeDL if it's missing)
categories: A list of categories that the video falls in, for example
["Sports", "Berlin"]
tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
is_live: True, False, or None (=unknown). Whether this video is a
live stream that goes on instead of a fixed-length video.
start_time: Time in seconds where the reproduction should start, as
specified in the URL.
end_time: Time in seconds where the reproduction should end, as
specified in the URL.
Unless mentioned otherwise, the fields should be Unicode strings.
Unless mentioned otherwise, None is equivalent to absence of information.
_type "playlist" indicates multiple videos.
There must be a key "entries", which is a list, an iterable, or a PagedList
object, each element of which is a valid dictionary by this specification.
Additionally, playlists can have "title", "description" and "id" attributes
with the same semantics as videos (see above).
_type "multi_video" indicates that there are multiple videos that
form a single show, for examples multiple acts of an opera or TV episode.
It must have an entries key like a playlist and contain all the keys
required for a video at the same time.
_type "url" indicates that the video must be extracted from another
location, possibly by a different extractor. Its only required key is:
"url" - the next URL to extract.
The key "ie_key" can be set to the class name (minus the trailing "IE",
e.g. "Youtube") if the extractor class is known in advance.
Additionally, the dictionary may have any properties of the resolved entity
known in advance, for example "title" if the title of the referred video is
known ahead of time.
_type "url_transparent" entities have the same specification as "url", but
indicate that the given additional information is more precise than the one
associated with the resolved URL.
This is useful when a site employs a video service that hosts the video and
its technical metadata, but that video service does not embed a useful
title, description etc.
Subclasses of this one should re-define the _real_initialize() and
_real_extract() methods and define a _VALID_URL regexp.
Probably, they should also be added to the list of extractors.
Finally, the _WORKING attribute should be set to False for broken IEs
in order to warn the users and skip the tests.
"""
_ready = False
_downloader = None
_WORKING = True
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader."""
self._ready = False
self.set_downloader(downloader)
@classmethod
def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
# This does not use has/getattr intentionally - we want to know whether
# we have cached the regexp for *this* class, whereas getattr would also
# match the superclass
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return cls._VALID_URL_RE.match(url) is not None
@classmethod
def _match_id(cls, url):
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
m = cls._VALID_URL_RE.match(url)
assert m
return m.group('id')
@classmethod
def working(cls):
"""Getter method for _WORKING."""
return cls._WORKING
def initialize(self):
"""Initializes an instance (authentication, etc)."""
if not self._ready:
self._real_initialize()
self._ready = True
def extract(self, url):
"""Extracts URL information and returns it in list of dicts."""
try:
self.initialize()
return self._real_extract(url)
except ExtractorError:
raise
except compat_http_client.IncompleteRead as e:
raise ExtractorError('A network error has occured.', cause=e, expected=True)
except (KeyError, StopIteration) as e:
raise ExtractorError('An extractor error has occured.', cause=e)
def set_downloader(self, downloader):
"""Sets the downloader for this IE."""
self._downloader = downloader
def _real_initialize(self):
"""Real initialization process. Redefine in subclasses."""
pass
def _real_extract(self, url):
"""Real extraction process. Redefine in subclasses."""
pass
@classmethod
def ie_key(cls):
"""A string for getting the InfoExtractor with get_info_extractor"""
return cls.__name__[:-2]
@property
def IE_NAME(self):
return type(self).__name__[:-2]
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True):
""" Returns the response handle """
if note is None:
self.report_download_webpage(video_id)
elif note is not False:
if video_id is None:
self.to_screen('%s' % (note,))
else:
self.to_screen('%s: %s' % (video_id, note))
try:
return self._downloader.urlopen(url_or_request)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
if errnote is False:
return False
if errnote is None:
errnote = 'Unable to download webpage'
errmsg = '%s: %s' % (errnote, compat_str(err))
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
else:
self._downloader.report_warning(errmsg)
return False
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None):
""" Returns a tuple (page content as string, URL handle) """
# Strip hashes from the URL (#1038)
if isinstance(url_or_request, (compat_str, str)):
url_or_request = url_or_request.partition('#')[0]
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal)
if urlh is False:
assert not fatal
return False
content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
return (content, urlh)
@staticmethod
def _guess_encoding_from_content(content_type, webpage_bytes):
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
if m:
encoding = m.group(1)
else:
m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
webpage_bytes[:1024])
if m:
encoding = m.group(1).decode('ascii')
elif webpage_bytes.startswith(b'\xff\xfe'):
encoding = 'utf-16'
else:
encoding = 'utf-8'
return encoding
def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
content_type = urlh.headers.get('Content-Type', '')
webpage_bytes = urlh.read()
if prefix is not None:
webpage_bytes = prefix + webpage_bytes
if not encoding:
encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
if self._downloader.params.get('dump_intermediate_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
self.to_screen('Dumping request to ' + url)
dump = base64.b64encode(webpage_bytes).decode('ascii')
self._downloader.to_screen(dump)
if self._downloader.params.get('write_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
basen = '%s_%s' % (video_id, url)
if len(basen) > 240:
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
basen = basen[:240 - len(h)] + h
raw_filename = basen + '.dump'
filename = sanitize_filename(raw_filename, restricted=True)
self.to_screen('Saving request to ' + filename)
# Working around MAX_PATH limitation on Windows (see
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
if os.name == 'nt':
absfilepath = os.path.abspath(filename)
if len(absfilepath) > 259:
filename = '\\\\?\\' + absfilepath
with open(filename, 'wb') as outf:
outf.write(webpage_bytes)
try:
content = webpage_bytes.decode(encoding, 'replace')
except LookupError:
content = webpage_bytes.decode('utf-8', 'replace')
if ('<title>Access to this site is blocked</title>' in content and
'Websense' in content[:512]):
msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
blocked_iframe = self._html_search_regex(
r'<iframe src="([^"]+)"', content,
'Websense information URL', default=None)
if blocked_iframe:
msg += ' Visit %s for more details' % blocked_iframe
raise ExtractorError(msg, expected=True)
if '<title>The URL you requested has been blocked</title>' in content[:512]:
msg = (
'Access to this webpage has been blocked by Indian censorship. '
'Use a VPN or proxy server (with --proxy) to route around it.')
block_msg = self._html_search_regex(
r'</h1><p>(.*?)</p>',
content, 'block message', default=None)
if block_msg:
msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
raise ExtractorError(msg, expected=True)
return content
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None):
""" Returns the data of the page as a string """
success = False
try_count = 0
while success is False:
try:
res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal, encoding=encoding)
success = True
except compat_http_client.IncompleteRead as e:
try_count += 1
if try_count >= tries:
raise e
self._sleep(timeout, video_id)
if res is False:
return res
else:
content, _ = res
return content
def _download_xml(self, url_or_request, video_id,
note='Downloading XML', errnote='Unable to download XML',
transform_source=None, fatal=True, encoding=None):
"""Return the xml as an xml.etree.ElementTree.Element"""
xml_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding)
if xml_string is False:
return xml_string
if transform_source:
xml_string = transform_source(xml_string)
return xml.etree.ElementTree.fromstring(xml_string.encode('utf-8'))
def _download_json(self, url_or_request, video_id,
note='Downloading JSON metadata',
errnote='Unable to download JSON metadata',
transform_source=None,
fatal=True, encoding=None):
json_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding)
if (not fatal) and json_string is False:
return None
return self._parse_json(
json_string, video_id, transform_source=transform_source, fatal=fatal)
def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
if transform_source:
json_string = transform_source(json_string)
try:
return json.loads(json_string)
except ValueError as ve:
errmsg = '%s: Failed to parse JSON ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def report_warning(self, msg, video_id=None):
idstr = '' if video_id is None else '%s: ' % video_id
self._downloader.report_warning(
'[%s] %s%s' % (self.IE_NAME, idstr, msg))
def to_screen(self, msg):
"""Print msg to screen, prefixing it with '[ie_name]'"""
self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
def report_extraction(self, id_or_name):
"""Report information extraction."""
self.to_screen('%s: Extracting information' % id_or_name)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self.to_screen('%s: Downloading webpage' % video_id)
def report_age_confirmation(self):
"""Report attempt to confirm age."""
self.to_screen('Confirming age')
def report_login(self):
"""Report attempt to log in."""
self.to_screen('Logging in')
# Methods for following #608
@staticmethod
def url_result(url, ie=None, video_id=None, video_title=None):
"""Returns a URL that points to a page that should be processed"""
# TODO: ie should be the class used for getting the info
video_info = {'_type': 'url',
'url': url,
'ie_key': ie}
if video_id is not None:
video_info['id'] = video_id
if video_title is not None:
video_info['title'] = video_title
return video_info
@staticmethod
def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
"""Returns a playlist"""
video_info = {'_type': 'playlist',
'entries': entries}
if playlist_id:
video_info['id'] = playlist_id
if playlist_title:
video_info['title'] = playlist_title
if playlist_description:
video_info['description'] = playlist_description
return video_info
def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group.
In case of failure return a default value or raise a WARNING or a
RegexNotFoundError, depending on fatal, specifying the field name.
"""
if isinstance(pattern, (str, compat_str, compiled_regex_type)):
mobj = re.search(pattern, string, flags)
else:
for p in pattern:
mobj = re.search(p, string, flags)
if mobj:
break
if not self._downloader.params.get('no_color') and os.name != 'nt' and sys.stderr.isatty():
_name = '\033[0;34m%s\033[0m' % name
else:
_name = name
if mobj:
if group is None:
# return the first matching group
return next(g for g in mobj.groups() if g is not None)
else:
return mobj.group(group)
elif default is not NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError('Unable to extract %s' % _name)
else:
self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
return None
def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Like _search_regex, but strips HTML tags and unescapes entities.
"""
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
if res:
return clean_html(res).strip()
else:
return res
def _get_login_info(self):
"""
Get the login info as (username, password)
It will look in the netrc file using the _NETRC_MACHINE value
If there's no info available, return (None, None)
"""
if self._downloader is None:
return (None, None)
username = None
password = None
downloader_params = self._downloader.params
# Attempt to use provided username and password or .netrc data
if downloader_params.get('username', None) is not None:
username = downloader_params['username']
password = downloader_params['password']
elif downloader_params.get('usenetrc', False):
try:
info = netrc.netrc().authenticators(self._NETRC_MACHINE)
if info is not None:
username = info[0]
password = info[2]
else:
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
except (IOError, netrc.NetrcParseError) as err:
self._downloader.report_warning('parsing .netrc: %s' % compat_str(err))
return (username, password)
def _get_tfa_info(self, note='two-factor verification code'):
"""
Get the two-factor authentication info
TODO - asking the user will be required for sms/phone verify
currently just uses the command line option
If there's no info available, return None
"""
if self._downloader is None:
return None
downloader_params = self._downloader.params
if downloader_params.get('twofactor', None) is not None:
return downloader_params['twofactor']
return compat_getpass('Type %s and press [Return]: ' % note)
# Helper functions for extracting OpenGraph info
@staticmethod
def _og_regexes(prop):
content_re = r'content=(?:"([^>]+?)"|\'([^>]+?)\')'
property_re = r'(?:name|property)=[\'"]og:%s[\'"]' % re.escape(prop)
template = r'<meta[^>]+?%s[^>]+?%s'
return [
template % (property_re, content_re),
template % (content_re, property_re),
]
@staticmethod
def _meta_regex(prop):
return r'''(?isx)<meta
(?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
[^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
def _og_search_property(self, prop, html, name=None, **kargs):
if name is None:
name = 'OpenGraph %s' % prop
escaped = self._search_regex(self._og_regexes(prop), html, name, flags=re.DOTALL, **kargs)
if escaped is None:
return None
return unescapeHTML(escaped)
def _og_search_thumbnail(self, html, **kargs):
return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
def _og_search_description(self, html, **kargs):
return self._og_search_property('description', html, fatal=False, **kargs)
def _og_search_title(self, html, **kargs):
return self._og_search_property('title', html, **kargs)
def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
regexes = self._og_regexes('video') + self._og_regexes('video:url')
if secure:
regexes = self._og_regexes('video:secure_url') + regexes
return self._html_search_regex(regexes, html, name, **kargs)
def _og_search_url(self, html, **kargs):
return self._og_search_property('url', html, **kargs)
def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
if display_name is None:
display_name = name
return self._html_search_regex(
self._meta_regex(name),
html, display_name, fatal=fatal, group='content', **kwargs)
def _dc_search_uploader(self, html):
return self._html_search_meta('dc.creator', html, 'uploader')
def _rta_search(self, html):
# See http://www.rtalabel.org/index.php?content=howtofaq#single
if re.search(r'(?ix)<meta\s+name="rating"\s+'
r' content="RTA-5042-1996-1400-1577-RTA"',
html):
return 18
return 0
def _media_rating_search(self, html):
# See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
rating = self._html_search_meta('rating', html)
if not rating:
return None
RATING_TABLE = {
'safe for kids': 0,
'general': 8,
'14 years': 14,
'mature': 17,
'restricted': 19,
}
return RATING_TABLE.get(rating.lower(), None)
def _family_friendly_search(self, html):
# See http://schema.org/VideoObject
family_friendly = self._html_search_meta('isFamilyFriendly', html)
if not family_friendly:
return None
RATING_TABLE = {
'1': 0,
'true': 0,
'0': 18,
'false': 18,
}
return RATING_TABLE.get(family_friendly.lower(), None)
def _twitter_search_player(self, html):
return self._html_search_meta('twitter:player', html,
'twitter card player')
@staticmethod
def _hidden_inputs(html):
hidden_inputs = {}
for input in re.findall(r'<input([^>]+)>', html):
if not re.search(r'type=(["\'])hidden\1', input):
continue
name = re.search(r'name=(["\'])(?P<value>.+?)\1', input)
if not name:
continue
value = re.search(r'value=(["\'])(?P<value>.*?)\1', input)
if not value:
continue
hidden_inputs[name.group('value')] = value.group('value')
return hidden_inputs
def _form_hidden_inputs(self, form_id, html):
form = self._search_regex(
r'(?s)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
html, '%s form' % form_id, group='form')
return self._hidden_inputs(form)
def _sort_formats(self, formats, field_preference=None):
if not formats:
raise ExtractorError('No video formats found')
def _formats_key(f):
# TODO remove the following workaround
from ..utils import determine_ext
if not f.get('ext') and 'url' in f:
f['ext'] = determine_ext(f['url'])
if isinstance(field_preference, (list, tuple)):
return tuple(f.get(field) if f.get(field) is not None else -1 for field in field_preference)
preference = f.get('preference')
if preference is None:
proto = f.get('protocol')
if proto is None:
proto = compat_urllib_parse_urlparse(f.get('url', '')).scheme
preference = 0 if proto in ['http', 'https'] else -0.1
if f.get('ext') in ['f4f', 'f4m']: # Not yet supported
preference -= 0.5
if f.get('vcodec') == 'none': # audio only
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
else:
ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
ext_preference = 0
try:
audio_ext_preference = ORDER.index(f['ext'])
except ValueError:
audio_ext_preference = -1
else:
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['flv', 'mp4', 'webm']
else:
ORDER = ['webm', 'flv', 'mp4']
try:
ext_preference = ORDER.index(f['ext'])
except ValueError:
ext_preference = -1
audio_ext_preference = 0
return (
preference,
f.get('language_preference') if f.get('language_preference') is not None else -1,
f.get('quality') if f.get('quality') is not None else -1,
f.get('tbr') if f.get('tbr') is not None else -1,
f.get('filesize') if f.get('filesize') is not None else -1,
f.get('vbr') if f.get('vbr') is not None else -1,
f.get('height') if f.get('height') is not None else -1,
f.get('width') if f.get('width') is not None else -1,
ext_preference,
f.get('abr') if f.get('abr') is not None else -1,
audio_ext_preference,
f.get('fps') if f.get('fps') is not None else -1,
f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
f.get('source_preference') if f.get('source_preference') is not None else -1,
f.get('format_id') if f.get('format_id') is not None else '',
)
formats.sort(key=_formats_key)
def _check_formats(self, formats, video_id):
if formats:
formats[:] = filter(
lambda f: self._is_valid_url(
f['url'], video_id,
item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
formats)
def _is_valid_url(self, url, video_id, item='video'):
url = self._proto_relative_url(url, scheme='http:')
# For now assume non HTTP(S) URLs always valid
if not (url.startswith('http://') or url.startswith('https://')):
return True
try:
self._request_webpage(url, video_id, 'Checking %s URL' % item)
return True
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError):
self.to_screen(
'%s: %s URL is invalid, skipping' % (video_id, item))
return False
raise
def http_scheme(self):
""" Either "http:" or "https:", depending on the user's preferences """
return (
'http:'
if self._downloader.params.get('prefer_insecure', False)
else 'https:')
def _proto_relative_url(self, url, scheme=None):
if url is None:
return url
if url.startswith('//'):
if scheme is None:
scheme = self.http_scheme()
return scheme + url
else:
return url
def _sleep(self, timeout, video_id, msg_template=None):
if msg_template is None:
msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
msg = msg_template % {'video_id': video_id, 'timeout': timeout}
self.to_screen(msg)
time.sleep(timeout)
def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip()):
manifest = self._download_xml(
manifest_url, video_id, 'Downloading f4m manifest',
'Unable to download f4m manifest',
# Some manifests may be malformed, e.g. prosiebensat1 generated manifests
# (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244)
transform_source=transform_source)
formats = []
manifest_version = '1.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
if not media_nodes:
manifest_version = '2.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
for i, media_el in enumerate(media_nodes):
if manifest_version == '2.0':
media_url = media_el.attrib.get('href') or media_el.attrib.get('url')
if not media_url:
continue
manifest_url = (
media_url if media_url.startswith('http://') or media_url.startswith('https://')
else ('/'.join(manifest_url.split('/')[:-1]) + '/' + media_url))
# If media_url is itself a f4m manifest do the recursive extraction
# since bitrates in parent manifest (this one) and media_url manifest
# may differ leading to inability to resolve the format by requested
# bitrate in f4m downloader
if determine_ext(manifest_url) == 'f4m':
formats.extend(self._extract_f4m_formats(manifest_url, video_id, preference, f4m_id))
continue
tbr = int_or_none(media_el.attrib.get('bitrate'))
formats.append({
'format_id': '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)])),
'url': manifest_url,
'ext': 'flv',
'tbr': tbr,
'width': int_or_none(media_el.attrib.get('width')),
'height': int_or_none(media_el.attrib.get('height')),
'preference': preference,
})
self._sort_formats(formats)
return formats
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, note=None, errnote=None,
fatal=True):
formats = [{
'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
'url': m3u8_url,
'ext': ext,
'protocol': 'm3u8',
'preference': preference - 1 if preference else -1,
'resolution': 'multiple',
'format_note': 'Quality selection URL',
}]
format_url = lambda u: (
u
if re.match(r'^https?://', u)
else compat_urlparse.urljoin(m3u8_url, u))
m3u8_doc = self._download_webpage(
m3u8_url, video_id,
note=note or 'Downloading m3u8 information',
errnote=errnote or 'Failed to download m3u8 information',
fatal=fatal)
if m3u8_doc is False:
return m3u8_doc
last_info = None
last_media = None
kv_rex = re.compile(
r'(?P<key>[a-zA-Z_-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)')
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-STREAM-INF:'):
last_info = {}
for m in kv_rex.finditer(line):
v = m.group('val')
if v.startswith('"'):
v = v[1:-1]
last_info[m.group('key')] = v
elif line.startswith('#EXT-X-MEDIA:'):
last_media = {}
for m in kv_rex.finditer(line):
v = m.group('val')
if v.startswith('"'):
v = v[1:-1]
last_media[m.group('key')] = v
elif line.startswith('#') or not line.strip():
continue
else:
if last_info is None:
formats.append({'url': format_url(line)})
continue
tbr = int_or_none(last_info.get('BANDWIDTH'), scale=1000)
format_id = []
if m3u8_id:
format_id.append(m3u8_id)
last_media_name = last_media.get('NAME') if last_media and last_media.get('TYPE') != 'SUBTITLES' else None
format_id.append(last_media_name if last_media_name else '%d' % (tbr if tbr else len(formats)))
f = {
'format_id': '-'.join(format_id),
'url': format_url(line.strip()),
'tbr': tbr,
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}
codecs = last_info.get('CODECS')
if codecs:
# TODO: looks like video codec is not always necessarily goes first
va_codecs = codecs.split(',')
if va_codecs[0]:
f['vcodec'] = va_codecs[0].partition('.')[0]
if len(va_codecs) > 1 and va_codecs[1]:
f['acodec'] = va_codecs[1].partition('.')[0]
resolution = last_info.get('RESOLUTION')
if resolution:
width_str, height_str = resolution.split('x')
f['width'] = int(width_str)
f['height'] = int(height_str)
if last_media is not None:
f['m3u8_media'] = last_media
last_media = None
formats.append(f)
last_info = {}
self._sort_formats(formats)
return formats
@staticmethod
def _xpath_ns(path, namespace=None):
if not namespace:
return path
out = []
for c in path.split('/'):
if not c or c == '.':
out.append(c)
else:
out.append('{%s}%s' % (namespace, c))
return '/'.join(out)
def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal)
if smil is False:
assert not fatal
return []
namespace = self._parse_smil_namespace(smil)
return self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal)
if smil is False:
return {}
return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
def _download_smil(self, smil_url, video_id, fatal=True):
return self._download_xml(
smil_url, video_id, 'Downloading SMIL file',
'Unable to download SMIL file', fatal=fatal)
def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
namespace = self._parse_smil_namespace(smil)
formats = self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
video_id = os.path.splitext(url_basename(smil_url))[0]
title = None
description = None
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
name = meta.attrib.get('name')
content = meta.attrib.get('content')
if not name or not content:
continue
if not title and name == 'title':
title = content
elif not description and name in ('description', 'abstract'):
description = content
return {
'id': video_id,
'title': title or video_id,
'description': description,
'formats': formats,
'subtitles': subtitles,
}
def _parse_smil_namespace(self, smil):
return self._search_regex(
r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None):
base = smil_url
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
b = meta.get('base') or meta.get('httpBase')
if b:
base = b
break
formats = []
rtmp_count = 0
http_count = 0
videos = smil.findall(self._xpath_ns('.//video', namespace))
for video in videos:
src = video.get('src')
if not src:
continue
bitrate = int_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000)
filesize = int_or_none(video.get('size') or video.get('fileSize'))
width = int_or_none(video.get('width'))
height = int_or_none(video.get('height'))
proto = video.get('proto')
ext = video.get('ext')
src_ext = determine_ext(src)
streamer = video.get('streamer') or base
if proto == 'rtmp' or streamer.startswith('rtmp'):
rtmp_count += 1
formats.append({
'url': streamer,
'play_path': src,
'ext': 'flv',
'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
continue
src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
if proto == 'm3u8' or src_ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
src_url, video_id, ext or 'mp4', m3u8_id='hls'))
continue
if src_ext == 'f4m':
f4m_url = src_url
if not f4m_params:
f4m_params = {
'hdcore': '3.2.0',
'plugin': 'flowplayer-3.2.0.1',
}
f4m_url += '&' if '?' in f4m_url else '?'
f4m_url += compat_urllib_parse.urlencode(f4m_params)
formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds'))
continue
if src_url.startswith('http'):
http_count += 1
formats.append({
'url': src_url,
'ext': ext or src_ext or 'flv',
'format_id': 'http-%d' % (bitrate or http_count),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
continue
self._sort_formats(formats)
return formats
def _parse_smil_subtitles(self, smil, namespace=None):
subtitles = {}
for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
src = textstream.get('src')
if not src:
continue
ext = textstream.get('ext') or determine_ext(src)
if not ext:
type_ = textstream.get('type')
if type_ == 'text/srt':
ext = 'srt'
lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName')
subtitles.setdefault(lang, []).append({
'url': src,
'ext': ext,
})
return subtitles
def _extract_xspf_playlist(self, playlist_url, playlist_id, fatal=True):
xspf = self._download_xml(
playlist_url, playlist_id, 'Downloading xpsf playlist',
'Unable to download xspf manifest', fatal=fatal)
if xspf is False:
return []
return self._parse_xspf(xspf, playlist_id)
def _parse_xspf(self, playlist, playlist_id):
NS_MAP = {
'xspf': 'http://xspf.org/ns/0/',
's1': 'http://static.streamone.nl/player/ns/0',
}
entries = []
for track in playlist.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
title = xpath_text(
track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
description = xpath_text(
track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
thumbnail = xpath_text(
track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
duration = float_or_none(
xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
formats = [{
'url': location.text,
'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
} for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP))]
self._sort_formats(formats)
entries.append({
'id': playlist_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
})
return entries
def _live_title(self, name):
""" Generate the title for a live video """
now = datetime.datetime.now()
now_str = now.strftime("%Y-%m-%d %H:%M")
return name + ' ' + now_str
def _int(self, v, name, fatal=False, **kwargs):
res = int_or_none(v, **kwargs)
if 'get_attr' in kwargs:
print(getattr(v, kwargs['get_attr']))
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _float(self, v, name, fatal=False, **kwargs):
res = float_or_none(v, **kwargs)
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _set_cookie(self, domain, name, value, expire_time=None):
cookie = compat_cookiejar.Cookie(
0, name, value, None, None, domain, None,
None, '/', True, False, expire_time, '', None, None, None)
self._downloader.cookiejar.set_cookie(cookie)
def _get_cookies(self, url):
""" Return a compat_cookies.SimpleCookie with the cookies for the url """
req = compat_urllib_request.Request(url)
self._downloader.cookiejar.add_cookie_header(req)
return compat_cookies.SimpleCookie(req.get_header('Cookie'))
def get_testcases(self, include_onlymatching=False):
t = getattr(self, '_TEST', None)
if t:
assert not hasattr(self, '_TESTS'), \
'%s has _TEST and _TESTS' % type(self).__name__
tests = [t]
else:
tests = getattr(self, '_TESTS', [])
for t in tests:
if not include_onlymatching and t.get('only_matching', False):
continue
t['name'] = type(self).__name__[:-len('IE')]
yield t
def is_suitable(self, age_limit):
""" Test whether the extractor is generally suitable for the given
age limit (i.e. pornographic sites are not, all others usually are) """
any_restricted = False
for tc in self.get_testcases(include_onlymatching=False):
if 'playlist' in tc:
tc = tc['playlist'][0]
is_restricted = age_restricted(
tc.get('info_dict', {}).get('age_limit'), age_limit)
if not is_restricted:
return True
any_restricted = any_restricted or is_restricted
return not any_restricted
def extract_subtitles(self, *args, **kwargs):
if (self._downloader.params.get('writesubtitles', False) or
self._downloader.params.get('listsubtitles')):
return self._get_subtitles(*args, **kwargs)
return {}
def _get_subtitles(self, *args, **kwargs):
raise NotImplementedError("This method must be implemented by subclasses")
def extract_automatic_captions(self, *args, **kwargs):
if (self._downloader.params.get('writeautomaticsub', False) or
self._downloader.params.get('listsubtitles')):
return self._get_automatic_captions(*args, **kwargs)
return {}
def _get_automatic_captions(self, *args, **kwargs):
raise NotImplementedError("This method must be implemented by subclasses")
class SearchInfoExtractor(InfoExtractor):
"""
Base class for paged search queries extractors.
They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
Instances should define _SEARCH_KEY and _MAX_RESULTS.
"""
@classmethod
def _make_valid_url(cls):
return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
@classmethod
def suitable(cls, url):
return re.match(cls._make_valid_url(), url) is not None
def _real_extract(self, query):
mobj = re.match(self._make_valid_url(), query)
if mobj is None:
raise ExtractorError('Invalid search query "%s"' % query)
prefix = mobj.group('prefix')
query = mobj.group('query')
if prefix == '':
return self._get_n_results(query, 1)
elif prefix == 'all':
return self._get_n_results(query, self._MAX_RESULTS)
else:
n = int(prefix)
if n <= 0:
raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
elif n > self._MAX_RESULTS:
self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
n = self._MAX_RESULTS
return self._get_n_results(query, n)
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
raise NotImplementedError("This method must be implemented by subclasses")
@property
def SEARCH_KEY(self):
return self._SEARCH_KEY
|
unlicense
|
TommiHelander/quabro
|
pgu/gui/__init__.py
|
3
|
1226
|
"""Modules for creating a widget-based user interface. See the examples folder
for sample scripts that use this module."""
import pygame
# The basestring class was removed in Python 3, but we want to keep it to maintain
# compatibility with previous versions of python.
try:
__builtins__["basestring"]
except KeyError:
__builtins__["basestring"] = str
from .theme import Theme
from .style import Style
from .widget import Widget
from .surface import subsurface, ProxySurface
from .const import *
from .container import Container
from .app import App, Desktop
from .table import Table
from .document import Document
#html
from .area import SlideBox, ScrollArea, List
from .form import Form
from .group import Group
from .basic import Spacer, Color, Label, Image, parse_color
from .button import Icon, Button, Switch, Checkbox, Radio, Tool, Link
from .input import Input, Password
from .keysym import Keysym
from .slider import VSlider, HSlider, VScrollBar, HScrollBar
from .select import Select
from .misc import ProgressBar
from .menus import Menus
from .dialog import Dialog, FileDialog
from .textarea import TextArea
from .deprecated import Toolbox, action_open, action_setvalue, action_quit, action_exec
|
gpl-3.0
|
mlperf/training_results_v0.6
|
Google/benchmarks/transformer/implementations/tpu-v3-2048-transformer/dataset_preproc/data_generators/subject_verb_agreement.py
|
7
|
8527
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data generators for subject-verb agreement dataset.
https://arxiv.org/pdf/1611.01368.pdf
Based on he main paper, predicting verb's number can be done in two setups:
- Language Modeling
- Binary Classification
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import gzip
import os
import random
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.data_generators import text_problems
from tensor2tensor.utils import metrics
from tensor2tensor.utils import registry
import tensorflow as tf
_FILE_NAME = 'agr_50_mostcommon_10K'
_TAR = _FILE_NAME + '.tsv.gz'
_URL = 'http://tallinzen.net/media/rnn_agreement/' + _TAR
_LABEL_DICT = {'VBZ': 0, 'VBP': 1}
def _build_vocab(examples, example_field, vocab_dir, vocab_name):
"""Build a vocabulary from examples.
Args:
examples: a dict containing all the examples.
example_field: field of example from which the vocabulary is built.
vocab_dir: directory where to save the vocabulary.
vocab_name: vocab file name.
Returns:
text encoder.
"""
vocab_path = os.path.join(vocab_dir, vocab_name)
if not tf.gfile.Exists(vocab_path):
data = []
for e in examples:
data.extend(e[example_field].split())
counter = collections.Counter(data)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*count_pairs))
encoder = text_encoder.TokenTextEncoder(None, vocab_list=words)
encoder.store_to_file(vocab_path)
else:
encoder = text_encoder.TokenTextEncoder(vocab_path)
return encoder
def load_examples(tmp_dir, prop_train=0.09, prop_val=0.01):
"""Loads exampls from the tsv file.
Args:
tmp_dir: temp directory.
prop_train: proportion of the train data
prop_val: proportion of the validation data
Returns:
All examples in the dataset pluse train, test, and development splits.
"""
infile = generator_utils.maybe_download(tmp_dir, _TAR, _URL)
tf.logging.info('Loading examples')
all_examples = []
for i, d in enumerate(csv.DictReader(gzip.open(infile), delimiter='\t')):
if i % 100000 == 0:
tf.logging.info('%d examples have been loaded....' % i)
ex = {x: int(y) if y.isdigit() else y for x, y in d.items()}
all_examples.append(ex)
random.seed(1)
random.shuffle(all_examples)
n_train = int(len(all_examples) * prop_train)
n_val = n_train + int(len(all_examples) * prop_val)
train = all_examples[:n_train]
val = all_examples[n_train:n_val]
test = []
for e in all_examples[n_val:]:
if e['n_intervening'] == e['n_diff_intervening']:
test.append(e)
return all_examples, train, val, test
@registry.register_problem
class SvaNumberPrediction(text_problems.Text2ClassProblem):
"""Subject verb agreement as verb number predicion (binary classification)."""
@property
def is_generate_per_split(self):
# generate_data will shard the data into TRAIN and EVAL for us.
return True
@property
def dataset_splits(self):
"""Splits of data to produce and number of output shards for each.
This is the setup of the main paper. 10% train/ 90% eval
Returns:
A dict containing splits information.
"""
return [{
'split': problem.DatasetSplit.TRAIN,
'shards': 1,
}, {
'split': problem.DatasetSplit.EVAL,
'shards': 1,
}, {
'split': problem.DatasetSplit.TEST,
'shards': 10,
}]
@property
def train_proportion(self):
# generate_data will shard the data into TRAIN and EVAL for us.
return 0.09
@property
def validation_proportion(self):
# generate_data will shard the data into TRAIN and EVAL for us.
return 0.01
@property
def vocab_type(self):
return text_problems.VocabType.TOKEN
@property
def num_classes(self):
return 2
def class_labels(self, data_dir):
"""Class labels."""
del data_dir
return ['VBZ', 'VBP']
def generate_samples(self, data_dir, tmp_dir, dataset_split):
"""Generate samples of text and label pairs.
Each yielded dict will be a single example. The inputs should be raw text.
The label should be an int in [0, self.num_classes).
Args:
data_dir: final data directory. Typically only used in this method to copy
over user-supplied vocab files (for example, if vocab_type ==
VocabType.TOKEN).
tmp_dir: temporary directory that you can use for downloading and scratch.
dataset_split: problem.DatasetSplit, which data split to generate samples
for (for example, training and evaluation).
Returns:
sample generator.
"""
example_filed = 'sentence'
examples_for_vocab, train, val, test = load_examples(
tmp_dir, self.train_proportion, self.validation_proportion)
_build_vocab(
examples_for_vocab, example_filed, data_dir, self.vocab_filename)
if dataset_split == problem.DatasetSplit.TRAIN:
examples = train
elif dataset_split == problem.DatasetSplit.EVAL:
examples = val
elif dataset_split == problem.DatasetSplit.TEST:
examples = test
def _generate_samples():
for example in examples:
index = int(example['verb_index']) - 1
inputs = example[example_filed].split()[:index]
yield {
'inputs': ' '.join(inputs),
'label': _LABEL_DICT[example['verb_pos']]
}
return _generate_samples()
def eval_metrics(self):
"""Specify the set of evaluation metrics for this problem.
Returns:
List of evaluation metrics of interest.
"""
# TODO(dehghani): Implement accuracy of the target word as a t2t metric.
return [metrics.Metrics.ACC]
@registry.register_problem
class SvaLanguageModeling(text_problems.Text2SelfProblem):
"""Subject verb agreement as language modeling task."""
@property
def is_generate_per_split(self):
# generate_data will shard the data into TRAIN and EVAL for us.
return True
@property
def dataset_splits(self):
"""Splits of data to produce and number of output shards for each.
This is the setup of the main paper. 10% train/ 90% eval
Returns:
A dict containing splits information.
"""
return [{
'split': problem.DatasetSplit.TRAIN,
'shards': 1,
}, {
'split': problem.DatasetSplit.EVAL,
'shards': 1,
}, {
'split': problem.DatasetSplit.TEST,
'shards': 10,
}]
@property
def train_proportion(self):
# generate_data will shard the data into TRAIN and EVAL for us.
return 0.09
@property
def validation_proportion(self):
# generate_data will shard the data into TRAIN and EVAL for us.
return 0.01
@property
def vocab_type(self):
return text_problems.VocabType.TOKEN
def generate_samples(self, data_dir, tmp_dir, dataset_split):
"""Generates samples.
Args:
data_dir: data directory
tmp_dir: temp directory
dataset_split: dataset split
Returns:
sample generator.
"""
example_filed = 'sentence'
examples_for_vocab, train, val, test = load_examples(
tmp_dir, self.train_proportion, self.validation_proportion)
_build_vocab(
examples_for_vocab, example_filed, data_dir, self.vocab_filename)
if dataset_split == problem.DatasetSplit.TRAIN:
examples = train
elif dataset_split == problem.DatasetSplit.EVAL:
examples = val
elif dataset_split == problem.DatasetSplit.TEST:
examples = test
def _generate_samples():
for example in examples:
index = int(example['verb_index']) - 1
targets = example[example_filed].split()[:index + 1]
yield {'targets': ' '.join(targets)}
return _generate_samples()
|
apache-2.0
|
shssoichiro/servo
|
tests/wpt/web-platform-tests/tools/pytest/_pytest/junitxml.py
|
168
|
12879
|
"""
report test results in JUnit-XML format,
for use with Jenkins and build integration servers.
Based on initial code from Ross Lawley.
"""
# Output conforms to https://github.com/jenkinsci/xunit-plugin/blob/master/
# src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd
import py
import os
import re
import sys
import time
import pytest
# Python 2.X and 3.X compatibility
if sys.version_info[0] < 3:
from codecs import open
else:
unichr = chr
unicode = str
long = int
class Junit(py.xml.Namespace):
pass
# We need to get the subset of the invalid unicode ranges according to
# XML 1.0 which are valid in this python build. Hence we calculate
# this dynamically instead of hardcoding it. The spec range of valid
# chars is: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD]
# | [#x10000-#x10FFFF]
_legal_chars = (0x09, 0x0A, 0x0d)
_legal_ranges = (
(0x20, 0x7E), (0x80, 0xD7FF), (0xE000, 0xFFFD), (0x10000, 0x10FFFF),
)
_legal_xml_re = [
unicode("%s-%s") % (unichr(low), unichr(high))
for (low, high) in _legal_ranges if low < sys.maxunicode
]
_legal_xml_re = [unichr(x) for x in _legal_chars] + _legal_xml_re
illegal_xml_re = re.compile(unicode('[^%s]') % unicode('').join(_legal_xml_re))
del _legal_chars
del _legal_ranges
del _legal_xml_re
_py_ext_re = re.compile(r"\.py$")
def bin_xml_escape(arg):
def repl(matchobj):
i = ord(matchobj.group())
if i <= 0xFF:
return unicode('#x%02X') % i
else:
return unicode('#x%04X') % i
return py.xml.raw(illegal_xml_re.sub(repl, py.xml.escape(arg)))
class _NodeReporter(object):
def __init__(self, nodeid, xml):
self.id = nodeid
self.xml = xml
self.add_stats = self.xml.add_stats
self.duration = 0
self.properties = []
self.nodes = []
self.testcase = None
self.attrs = {}
def append(self, node):
self.xml.add_stats(type(node).__name__)
self.nodes.append(node)
def add_property(self, name, value):
self.properties.append((str(name), bin_xml_escape(value)))
def make_properties_node(self):
"""Return a Junit node containing custom properties, if any.
"""
if self.properties:
return Junit.properties([
Junit.property(name=name, value=value)
for name, value in self.properties
])
return ''
def record_testreport(self, testreport):
assert not self.testcase
names = mangle_test_address(testreport.nodeid)
classnames = names[:-1]
if self.xml.prefix:
classnames.insert(0, self.xml.prefix)
attrs = {
"classname": ".".join(classnames),
"name": bin_xml_escape(names[-1]),
"file": testreport.location[0],
}
if testreport.location[1] is not None:
attrs["line"] = testreport.location[1]
self.attrs = attrs
def to_xml(self):
testcase = Junit.testcase(time=self.duration, **self.attrs)
testcase.append(self.make_properties_node())
for node in self.nodes:
testcase.append(node)
return testcase
def _add_simple(self, kind, message, data=None):
data = bin_xml_escape(data)
node = kind(data, message=message)
self.append(node)
def _write_captured_output(self, report):
for capname in ('out', 'err'):
allcontent = ""
for name, content in report.get_sections("Captured std%s" %
capname):
allcontent += content
if allcontent:
tag = getattr(Junit, 'system-' + capname)
self.append(tag(bin_xml_escape(allcontent)))
def append_pass(self, report):
self.add_stats('passed')
self._write_captured_output(report)
def append_failure(self, report):
# msg = str(report.longrepr.reprtraceback.extraline)
if hasattr(report, "wasxfail"):
self._add_simple(
Junit.skipped,
"xfail-marked test passes unexpectedly")
else:
if hasattr(report.longrepr, "reprcrash"):
message = report.longrepr.reprcrash.message
elif isinstance(report.longrepr, (unicode, str)):
message = report.longrepr
else:
message = str(report.longrepr)
message = bin_xml_escape(message)
fail = Junit.failure(message=message)
fail.append(bin_xml_escape(report.longrepr))
self.append(fail)
self._write_captured_output(report)
def append_collect_error(self, report):
# msg = str(report.longrepr.reprtraceback.extraline)
self.append(Junit.error(bin_xml_escape(report.longrepr),
message="collection failure"))
def append_collect_skipped(self, report):
self._add_simple(
Junit.skipped, "collection skipped", report.longrepr)
def append_error(self, report):
self._add_simple(
Junit.error, "test setup failure", report.longrepr)
self._write_captured_output(report)
def append_skipped(self, report):
if hasattr(report, "wasxfail"):
self._add_simple(
Junit.skipped, "expected test failure", report.wasxfail
)
else:
filename, lineno, skipreason = report.longrepr
if skipreason.startswith("Skipped: "):
skipreason = bin_xml_escape(skipreason[9:])
self.append(
Junit.skipped("%s:%s: %s" % (filename, lineno, skipreason),
type="pytest.skip",
message=skipreason))
self._write_captured_output(report)
def finalize(self):
data = self.to_xml().unicode(indent=0)
self.__dict__.clear()
self.to_xml = lambda: py.xml.raw(data)
@pytest.fixture
def record_xml_property(request):
"""Fixture that adds extra xml properties to the tag for the calling test.
The fixture is callable with (name, value), with value being automatically
xml-encoded.
"""
request.node.warn(
code='C3',
message='record_xml_property is an experimental feature',
)
xml = getattr(request.config, "_xml", None)
if xml is not None:
node_reporter = xml.node_reporter(request.node.nodeid)
return node_reporter.add_property
else:
def add_property_noop(name, value):
pass
return add_property_noop
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting")
group.addoption(
'--junitxml', '--junit-xml',
action="store",
dest="xmlpath",
metavar="path",
default=None,
help="create junit-xml style report file at given path.")
group.addoption(
'--junitprefix', '--junit-prefix',
action="store",
metavar="str",
default=None,
help="prepend prefix to classnames in junit-xml output")
def pytest_configure(config):
xmlpath = config.option.xmlpath
# prevent opening xmllog on slave nodes (xdist)
if xmlpath and not hasattr(config, 'slaveinput'):
config._xml = LogXML(xmlpath, config.option.junitprefix)
config.pluginmanager.register(config._xml)
def pytest_unconfigure(config):
xml = getattr(config, '_xml', None)
if xml:
del config._xml
config.pluginmanager.unregister(xml)
def mangle_test_address(address):
path, possible_open_bracket, params = address.partition('[')
names = path.split("::")
try:
names.remove('()')
except ValueError:
pass
# convert file path to dotted path
names[0] = names[0].replace("/", '.')
names[0] = _py_ext_re.sub("", names[0])
# put any params back
names[-1] += possible_open_bracket + params
return names
class LogXML(object):
def __init__(self, logfile, prefix):
logfile = os.path.expanduser(os.path.expandvars(logfile))
self.logfile = os.path.normpath(os.path.abspath(logfile))
self.prefix = prefix
self.stats = dict.fromkeys([
'error',
'passed',
'failure',
'skipped',
], 0)
self.node_reporters = {} # nodeid -> _NodeReporter
self.node_reporters_ordered = []
def finalize(self, report):
nodeid = getattr(report, 'nodeid', report)
# local hack to handle xdist report order
slavenode = getattr(report, 'node', None)
reporter = self.node_reporters.pop((nodeid, slavenode))
if reporter is not None:
reporter.finalize()
def node_reporter(self, report):
nodeid = getattr(report, 'nodeid', report)
# local hack to handle xdist report order
slavenode = getattr(report, 'node', None)
key = nodeid, slavenode
if key in self.node_reporters:
# TODO: breasks for --dist=each
return self.node_reporters[key]
reporter = _NodeReporter(nodeid, self)
self.node_reporters[key] = reporter
self.node_reporters_ordered.append(reporter)
return reporter
def add_stats(self, key):
if key in self.stats:
self.stats[key] += 1
def _opentestcase(self, report):
reporter = self.node_reporter(report)
reporter.record_testreport(report)
return reporter
def pytest_runtest_logreport(self, report):
"""handle a setup/call/teardown report, generating the appropriate
xml tags as necessary.
note: due to plugins like xdist, this hook may be called in interlaced
order with reports from other nodes. for example:
usual call order:
-> setup node1
-> call node1
-> teardown node1
-> setup node2
-> call node2
-> teardown node2
possible call order in xdist:
-> setup node1
-> call node1
-> setup node2
-> call node2
-> teardown node2
-> teardown node1
"""
if report.passed:
if report.when == "call": # ignore setup/teardown
reporter = self._opentestcase(report)
reporter.append_pass(report)
elif report.failed:
reporter = self._opentestcase(report)
if report.when == "call":
reporter.append_failure(report)
else:
reporter.append_error(report)
elif report.skipped:
reporter = self._opentestcase(report)
reporter.append_skipped(report)
self.update_testcase_duration(report)
if report.when == "teardown":
self.finalize(report)
def update_testcase_duration(self, report):
"""accumulates total duration for nodeid from given report and updates
the Junit.testcase with the new total if already created.
"""
reporter = self.node_reporter(report)
reporter.duration += getattr(report, 'duration', 0.0)
def pytest_collectreport(self, report):
if not report.passed:
reporter = self._opentestcase(report)
if report.failed:
reporter.append_collect_error(report)
else:
reporter.append_collect_skipped(report)
def pytest_internalerror(self, excrepr):
reporter = self.node_reporter('internal')
reporter.attrs.update(classname="pytest", name='internal')
reporter._add_simple(Junit.error, 'internal error', excrepr)
def pytest_sessionstart(self):
self.suite_start_time = time.time()
def pytest_sessionfinish(self):
dirname = os.path.dirname(os.path.abspath(self.logfile))
if not os.path.isdir(dirname):
os.makedirs(dirname)
logfile = open(self.logfile, 'w', encoding='utf-8')
suite_stop_time = time.time()
suite_time_delta = suite_stop_time - self.suite_start_time
numtests = self.stats['passed'] + self.stats['failure']
logfile.write('<?xml version="1.0" encoding="utf-8"?>')
logfile.write(Junit.testsuite(
[x.to_xml() for x in self.node_reporters_ordered],
name="pytest",
errors=self.stats['error'],
failures=self.stats['failure'],
skips=self.stats['skipped'],
tests=numtests,
time="%.3f" % suite_time_delta, ).unicode(indent=0))
logfile.close()
def pytest_terminal_summary(self, terminalreporter):
terminalreporter.write_sep("-",
"generated xml file: %s" % (self.logfile))
|
mpl-2.0
|
josephlewis42/autopilot
|
extern/gtest/test/gtest_break_on_failure_unittest.py
|
2140
|
7339
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's break-on-failure mode.
A user can ask Google Test to seg-fault when an assertion fails, using
either the GTEST_BREAK_ON_FAILURE environment variable or the
--gtest_break_on_failure flag. This script tests such functionality
by invoking gtest_break_on_failure_unittest_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import gtest_test_utils
import os
import sys
# Constants.
IS_WINDOWS = os.name == 'nt'
# The environment variable for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_ENV_VAR = 'GTEST_BREAK_ON_FAILURE'
# The command line flag for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_FLAG = 'gtest_break_on_failure'
# The environment variable for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE_ENV_VAR = 'GTEST_THROW_ON_FAILURE'
# The environment variable for enabling/disabling the catch-exceptions mode.
CATCH_EXCEPTIONS_ENV_VAR = 'GTEST_CATCH_EXCEPTIONS'
# Path to the gtest_break_on_failure_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_break_on_failure_unittest_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
def Run(command):
"""Runs a command; returns 1 if it was killed by a signal, or 0 otherwise."""
p = gtest_test_utils.Subprocess(command, env=environ)
if p.terminated_by_signal:
return 1
else:
return 0
# The tests.
class GTestBreakOnFailureUnitTest(gtest_test_utils.TestCase):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable or
the --gtest_break_on_failure flag to turn assertion failures into
segmentation faults.
"""
def RunAndVerify(self, env_var_value, flag_value, expect_seg_fault):
"""Runs gtest_break_on_failure_unittest_ and verifies that it does
(or does not) have a seg-fault.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
expect_seg_fault: 1 if the program is expected to generate a seg-fault;
0 otherwise.
"""
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % BREAK_ON_FAILURE_FLAG
else:
flag = '--%s' % BREAK_ON_FAILURE_FLAG
command = [EXE_PATH]
if flag:
command.append(flag)
if expect_seg_fault:
should_or_not = 'should'
else:
should_or_not = 'should not'
has_seg_fault = Run(command)
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a seg-fault.' %
(BREAK_ON_FAILURE_ENV_VAR, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(has_seg_fault == expect_seg_fault, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None,
flag_value=None,
expect_seg_fault=0)
def testEnvVar(self):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value=None,
expect_seg_fault=1)
def testFlag(self):
"""Tests using the --gtest_break_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
def testFlagOverridesEnvVar(self):
"""Tests that the flag overrides the environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='0',
flag_value='1',
expect_seg_fault=1)
self.RunAndVerify(env_var_value='1',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
def testBreakOnFailureOverridesThrowOnFailure(self):
"""Tests that gtest_break_on_failure overrides gtest_throw_on_failure."""
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, None)
if IS_WINDOWS:
def testCatchExceptionsDoesNotInterfere(self):
"""Tests that gtest_catch_exceptions doesn't interfere."""
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, None)
if __name__ == '__main__':
gtest_test_utils.Main()
|
gpl-3.0
|
nburn42/tensorflow
|
tensorflow/python/ops/linalg/linear_operator_kronecker.py
|
23
|
22568
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Construct the Kronecker product of one or more `LinearOperators`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"LinearOperatorKronecker",
]
def _vec(x):
"""Stacks column of matrix to form a single column."""
return array_ops.reshape(
array_ops.matrix_transpose(x),
array_ops.concat(
[array_ops.shape(x)[:-2], [-1]], axis=0))
def _unvec_by(y, num_col):
"""Unstack vector to form a matrix, with a specified amount of columns."""
return array_ops.matrix_transpose(
array_ops.reshape(
y,
array_ops.concat(
[array_ops.shape(y)[:-1], [num_col, -1]], axis=0)))
def _rotate_last_dim(x, rotate_right=False):
"""Rotate the last dimension either left or right."""
ndims = array_ops.rank(x)
if rotate_right:
transpose_perm = array_ops.concat(
[[ndims - 1], math_ops.range(0, ndims - 1)], axis=0)
else:
transpose_perm = array_ops.concat(
[math_ops.range(1, ndims), [0]], axis=0)
return array_ops.transpose(x, transpose_perm)
@tf_export("linalg.LinearOperatorKronecker")
class LinearOperatorKronecker(linear_operator.LinearOperator):
"""Kronecker product between two `LinearOperators`.
This operator composes one or more linear operators `[op1,...,opJ]`,
building a new `LinearOperator` representing the Kronecker product:
`op1 x op2 x .. opJ` (we omit parentheses as the Kronecker product is
associative).
If `opj` has shape `batch_shape_j` + [M_j, N_j`, then the composed operator
will have shape equal to `broadcast_batch_shape + [prod M_j, prod N_j]`,
where the product is over all operators.
```python
# Create a 4 x 4 linear operator composed of two 2 x 2 operators.
operator_1 = LinearOperatorFullMatrix([[1., 2.], [3., 4.]])
operator_2 = LinearOperatorFullMatrix([[1., 0.], [2., 1.]])
operator = LinearOperatorKronecker([operator_1, operator_2])
operator.to_dense()
==> [[1., 2., 0., 0.],
[3., 4., 0., 0.],
[2., 4., 1., 2.],
[6., 8., 3., 4.]]
operator.shape
==> [4, 4]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [4, 2] Tensor
operator.matmul(x)
==> Shape [4, 2] Tensor
# Create a [2, 3] batch of 4 x 5 linear operators.
matrix_45 = tf.random_normal(shape=[2, 3, 4, 5])
operator_45 = LinearOperatorFullMatrix(matrix)
# Create a [2, 3] batch of 5 x 6 linear operators.
matrix_56 = tf.random_normal(shape=[2, 3, 5, 6])
operator_56 = LinearOperatorFullMatrix(matrix_56)
# Compose to create a [2, 3] batch of 20 x 30 operators.
operator_large = LinearOperatorKronecker([operator_45, operator_56])
# Create a shape [2, 3, 20, 2] vector.
x = tf.random_normal(shape=[2, 3, 6, 2])
operator_large.matmul(x)
==> Shape [2, 3, 30, 2] Tensor
```
#### Performance
The performance of `LinearOperatorKronecker` on any operation is equal to
the sum of the individual operators' operations.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
operators,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name=None):
r"""Initialize a `LinearOperatorKronecker`.
`LinearOperatorKronecker` is initialized with a list of operators
`[op_1,...,op_J]`.
Args:
operators: Iterable of `LinearOperator` objects, each with
the same `dtype` and composable shape, representing the Kronecker
factors.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix\
#Extension_for_non_symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`. Default is the individual
operators names joined with `_x_`.
Raises:
TypeError: If all operators do not have the same `dtype`.
ValueError: If `operators` is empty.
"""
# Validate operators.
check_ops.assert_proper_iterable(operators)
operators = list(operators)
if not operators:
raise ValueError(
"Expected a list of >=1 operators. Found: %s" % operators)
self._operators = operators
# Validate dtype.
dtype = operators[0].dtype
for operator in operators:
if operator.dtype != dtype:
name_type = (str((o.name, o.dtype)) for o in operators)
raise TypeError(
"Expected all operators to have the same dtype. Found %s"
% " ".join(name_type))
# Auto-set and check hints.
# A Kronecker product is invertible, if and only if all factors are
# invertible.
if all(operator.is_non_singular for operator in operators):
if is_non_singular is False:
raise ValueError(
"The Kronecker product of non-singular operators is always "
"non-singular.")
is_non_singular = True
if all(operator.is_self_adjoint for operator in operators):
if is_self_adjoint is False:
raise ValueError(
"The Kronecker product of self-adjoint operators is always "
"self-adjoint.")
is_self_adjoint = True
# The eigenvalues of a Kronecker product are equal to the products of eigen
# values of the corresponding factors.
if all(operator.is_positive_definite for operator in operators):
if is_positive_definite is False:
raise ValueError("The Kronecker product of positive-definite operators "
"is always positive-definite.")
is_positive_definite = True
# Initialization.
graph_parents = []
for operator in operators:
graph_parents.extend(operator.graph_parents)
if name is None:
name = operators[0].name
for operator in operators[1:]:
name += "_x_" + operator.name
with ops.name_scope(name, values=graph_parents):
super(LinearOperatorKronecker, self).__init__(
dtype=dtype,
graph_parents=graph_parents,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
@property
def operators(self):
return self._operators
def _shape(self):
# Get final matrix shape.
domain_dimension = self.operators[0].domain_dimension
for operator in self.operators[1:]:
domain_dimension *= operator.domain_dimension
range_dimension = self.operators[0].range_dimension
for operator in self.operators[1:]:
range_dimension *= operator.range_dimension
matrix_shape = tensor_shape.TensorShape([
range_dimension, domain_dimension])
# Get broadcast batch shape.
# broadcast_shape checks for compatibility.
batch_shape = self.operators[0].batch_shape
for operator in self.operators[1:]:
batch_shape = common_shapes.broadcast_shape(
batch_shape, operator.batch_shape)
return batch_shape.concatenate(matrix_shape)
def _shape_tensor(self):
domain_dimension = self.operators[0].domain_dimension_tensor()
for operator in self.operators[1:]:
domain_dimension *= operator.domain_dimension_tensor()
range_dimension = self.operators[0].range_dimension_tensor()
for operator in self.operators[1:]:
range_dimension *= operator.range_dimension_tensor()
matrix_shape = [range_dimension, domain_dimension]
# Get broadcast batch shape.
# broadcast_shape checks for compatibility.
batch_shape = self.operators[0].batch_shape_tensor()
for operator in self.operators[1:]:
batch_shape = array_ops.broadcast_dynamic_shape(
batch_shape, operator.batch_shape_tensor())
return array_ops.concat((batch_shape, matrix_shape), 0)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
# Here we heavily rely on Roth's column Lemma [1]:
# (A x B) * vec X = vec BXA^T,
# where vec stacks all the columns of the matrix under each other. In our
# case, x represents a batch of vec X (i.e. we think of x as a batch of
# column vectors, rather than a matrix). Each member of the batch can be
# reshaped to a matrix (hence we get a batch of matrices).
# We can iteratively apply this lemma by noting that if B is a Kronecker
# product, then we can apply the lemma again.
# [1] W. E. Roth, "On direct product matrices,"
# Bulletin of the American Mathematical Society, vol. 40, pp. 461-468,
# 1934
# Efficiency
# Naively doing the Kronecker product, by calculating the dense matrix and
# applying it will can take cubic time in the size of domain_dimension
# (assuming a square matrix). The other issue is that calculating the dense
# matrix can be prohibitively expensive, in that it can take a large amount
# of memory.
#
# This implementation avoids this memory blow up by only computing matmuls
# with the factors. In this way, we don't have to realize the dense matrix.
# In terms of complexity, if we have Kronecker Factors of size:
# (n1, n1), (n2, n2), (n3, n3), ... (nJ, nJ), with N = \prod n_i, and we
# have as input a [N, M] matrix, the naive approach would take O(N^2 M).
# With this approach (ignoring reshaping of tensors and transposes for now),
# the time complexity can be O(M * (\sum n_i) * N). There is also the
# benefit of batched multiplication (In this example, the batch size is
# roughly M * N) so this can be much faster. However, not factored in are
# the costs of the several transposing of tensors, which can affect cache
# behavior.
# Below we document the shape manipulation for adjoint=False,
# adjoint_arg=False, but the general case of different adjoints is still
# handled.
if adjoint_arg:
x = linalg.adjoint(x)
# Always add a batch dimension to enable broadcasting to work.
batch_shape = array_ops.concat(
[array_ops.ones_like(self.batch_shape_tensor()), [1, 1]], 0)
x += array_ops.zeros(batch_shape, dtype=x.dtype.base_dtype)
# x has shape [B, R, C], where B represent some number of batch dimensions,
# R represents the number of rows, and C represents the number of columns.
# In order to apply Roth's column lemma, we need to operate on a batch of
# column vectors, so we reshape into a batch of column vectors. We put it
# at the front to ensure that broadcasting between operators to the batch
# dimensions B still works.
output = _rotate_last_dim(x, rotate_right=True)
# Also expand the shape to be [A, C, B, R]. The first dimension will be
# used to accumulate dimensions from each operator matmul.
output = output[array_ops.newaxis, ...]
# In this loop, A is going to refer to the value of the accumulated
# dimension. A = 1 at the start, and will end up being self.range_dimension.
# V will refer to the last dimension. V = R at the start, and will end up
# being 1 in the end.
for operator in self.operators[:-1]:
# Reshape output from [A, C, B, V] to be
# [A, C, B, V / op.domain_dimension, op.domain_dimension]
if adjoint:
operator_dimension = operator.range_dimension_tensor()
else:
operator_dimension = operator.domain_dimension_tensor()
output = _unvec_by(output, operator_dimension)
# We are computing (XA^T) = (AX^T)^T.
# output has [A, C, B, V / op.domain_dimension, op.domain_dimension],
# which is being converted to:
# [A, C, B, V / op.domain_dimension, op.range_dimension]
output = array_ops.matrix_transpose(output)
output = operator.matmul(output, adjoint=adjoint, adjoint_arg=False)
output = array_ops.matrix_transpose(output)
# Rearrange it to [A * op.range_dimension, C, B, V / op.domain_dimension]
output = _rotate_last_dim(output, rotate_right=False)
output = _vec(output)
output = _rotate_last_dim(output, rotate_right=True)
# After the loop, we will have
# A = self.range_dimension / op[-1].range_dimension
# V = op[-1].domain_dimension
# We convert that using matvec to get:
# [A, C, B, op[-1].range_dimension]
output = self.operators[-1].matvec(output, adjoint=adjoint)
# Rearrange shape to be [B1, ... Bn, self.range_dimension, C]
output = _rotate_last_dim(output, rotate_right=False)
output = _vec(output)
output = _rotate_last_dim(output, rotate_right=False)
if x.shape.is_fully_defined():
column_dim = x.shape[-1]
broadcast_batch_shape = common_shapes.broadcast_shape(
x.shape[:-2], self.batch_shape)
if adjoint:
matrix_dimensions = [self.domain_dimension, column_dim]
else:
matrix_dimensions = [self.range_dimension, column_dim]
output.set_shape(broadcast_batch_shape.concatenate(
matrix_dimensions))
return output
def _determinant(self):
# Note that we have |X1 x X2| = |X1| ** n * |X2| ** m, where X1 is an m x m
# matrix, and X2 is an n x n matrix. We can iteratively apply this property
# to get the determinant of |X1 x X2 x X3 ...|. If T is the product of the
# domain dimension of all operators, then we have:
# |X1 x X2 x X3 ...| =
# |X1| ** (T / m) * |X2 x X3 ... | ** m =
# |X1| ** (T / m) * |X2| ** (m * (T / m) / n) * ... =
# |X1| ** (T / m) * |X2| ** (T / n) * | X3 x X4... | ** (m * n)
# And by doing induction we have product(|X_i| ** (T / dim(X_i))).
total = self.domain_dimension_tensor()
determinant = 1.
for operator in self.operators:
determinant *= operator.determinant() ** math_ops.cast(
total / operator.domain_dimension_tensor(),
dtype=operator.dtype)
return determinant
def _log_abs_determinant(self):
# This will be sum((total / dim(x_i)) * log |X_i|)
total = self.domain_dimension_tensor()
log_abs_det = 0.
for operator in self.operators:
log_abs_det += operator.log_abs_determinant() * math_ops.cast(
total / operator.domain_dimension_tensor(),
dtype=operator.dtype)
return log_abs_det
def _trace(self):
# tr(A x B) = tr(A) * tr(B)
trace = 1.
for operator in self.operators:
trace *= operator.trace()
return trace
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
# Here we follow the same use of Roth's column lemma as in `matmul`, with
# the key difference that we replace all `matmul` instances with `solve`.
# This follows from the property that inv(A x B) = inv(A) x inv(B).
# Below we document the shape manipulation for adjoint=False,
# adjoint_arg=False, but the general case of different adjoints is still
# handled.
if adjoint_arg:
rhs = linalg.adjoint(rhs)
# Always add a batch dimension to enable broadcasting to work.
batch_shape = array_ops.concat(
[array_ops.ones_like(self.batch_shape_tensor()), [1, 1]], 0)
rhs += array_ops.zeros(batch_shape, dtype=rhs.dtype.base_dtype)
# rhs has shape [B, R, C], where B represent some number of batch
# dimensions,
# R represents the number of rows, and C represents the number of columns.
# In order to apply Roth's column lemma, we need to operate on a batch of
# column vectors, so we reshape into a batch of column vectors. We put it
# at the front to ensure that broadcasting between operators to the batch
# dimensions B still works.
output = _rotate_last_dim(rhs, rotate_right=True)
# Also expand the shape to be [A, C, B, R]. The first dimension will be
# used to accumulate dimensions from each operator matmul.
output = output[array_ops.newaxis, ...]
# In this loop, A is going to refer to the value of the accumulated
# dimension. A = 1 at the start, and will end up being self.range_dimension.
# V will refer to the last dimension. V = R at the start, and will end up
# being 1 in the end.
for operator in self.operators[:-1]:
# Reshape output from [A, C, B, V] to be
# [A, C, B, V / op.domain_dimension, op.domain_dimension]
if adjoint:
operator_dimension = operator.range_dimension_tensor()
else:
operator_dimension = operator.domain_dimension_tensor()
output = _unvec_by(output, operator_dimension)
# We are computing (XA^-1^T) = (A^-1 X^T)^T.
# output has [A, C, B, V / op.domain_dimension, op.domain_dimension],
# which is being converted to:
# [A, C, B, V / op.domain_dimension, op.range_dimension]
output = array_ops.matrix_transpose(output)
output = operator.solve(output, adjoint=adjoint, adjoint_arg=False)
output = array_ops.matrix_transpose(output)
# Rearrange it to [A * op.range_dimension, C, B, V / op.domain_dimension]
output = _rotate_last_dim(output, rotate_right=False)
output = _vec(output)
output = _rotate_last_dim(output, rotate_right=True)
# After the loop, we will have
# A = self.range_dimension / op[-1].range_dimension
# V = op[-1].domain_dimension
# We convert that using matvec to get:
# [A, C, B, op[-1].range_dimension]
output = self.operators[-1].solvevec(output, adjoint=adjoint)
# Rearrange shape to be [B1, ... Bn, self.range_dimension, C]
output = _rotate_last_dim(output, rotate_right=False)
output = _vec(output)
output = _rotate_last_dim(output, rotate_right=False)
if rhs.shape.is_fully_defined():
column_dim = rhs.shape[-1]
broadcast_batch_shape = common_shapes.broadcast_shape(
rhs.shape[:-2], self.batch_shape)
if adjoint:
matrix_dimensions = [self.domain_dimension, column_dim]
else:
matrix_dimensions = [self.range_dimension, column_dim]
output.set_shape(broadcast_batch_shape.concatenate(
matrix_dimensions))
return output
def _diag_part(self):
diag_part = self.operators[0].diag_part()
for operator in self.operators[1:]:
diag_part = diag_part[..., :, array_ops.newaxis]
op_diag_part = operator.diag_part()[..., array_ops.newaxis, :]
diag_part *= op_diag_part
diag_part = array_ops.reshape(
diag_part,
shape=array_ops.concat(
[array_ops.shape(diag_part)[:-2], [-1]], axis=0))
if self.range_dimension > self.domain_dimension:
diag_dimension = self.domain_dimension
else:
diag_dimension = self.range_dimension
diag_part.set_shape(
self.batch_shape.concatenate(diag_dimension))
return diag_part
def _to_dense(self):
product = self.operators[0].to_dense()
for operator in self.operators[1:]:
# Product has shape [B, R1, 1, C1].
product = product[
..., :, array_ops.newaxis, :, array_ops.newaxis]
# Operator has shape [B, 1, R2, 1, C2].
op_to_mul = operator.to_dense()[
..., array_ops.newaxis, :, array_ops.newaxis, :]
# This is now [B, R1, R2, C1, C2].
product *= op_to_mul
# Now merge together dimensions to get [B, R1 * R2, C1 * C2].
product = array_ops.reshape(
product,
shape=array_ops.concat(
[array_ops.shape(product)[:-4],
[array_ops.shape(product)[-4] * array_ops.shape(product)[-3],
array_ops.shape(product)[-2] * array_ops.shape(product)[-1]]
], axis=0))
product.set_shape(self.shape)
return product
def _assert_non_singular(self):
if all(operator.is_square for operator in self.operators):
asserts = [operator.assert_non_singular() for operator in self.operators]
return control_flow_ops.group(asserts)
else:
raise errors.InvalidArgumentError(
node_def=None, op=None, message="All Kronecker factors must be "
"square for the product to be invertible.")
def _assert_self_adjoint(self):
if all(operator.is_square for operator in self.operators):
asserts = [operator.assert_self_adjoint() for operator in self.operators]
return control_flow_ops.group(asserts)
else:
raise errors.InvalidArgumentError(
node_def=None, op=None, message="All Kronecker factors must be "
"square for the product to be self adjoint.")
|
apache-2.0
|
mbkumar/pymatgen
|
pymatgen/symmetry/structure.py
|
3
|
5251
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements symmetry-related structure forms.
"""
from typing import Sequence, List
import numpy as np
from tabulate import tabulate
from pymatgen.core.structure import Structure, PeriodicSite
class SymmetrizedStructure(Structure):
"""
This class represents a symmetrized structure, i.e. a structure
where the spacegroup and symmetry operations are defined. This class is
typically not called but instead is typically obtained by calling
pymatgen.symmetry.analyzer.SpacegroupAnalyzer.get_symmetrized_structure.
.. attribute: equivalent_indices
indices of structure grouped by equivalency
"""
def __init__(self, structure: Structure,
spacegroup,
equivalent_positions: Sequence[int],
wyckoff_letters: Sequence[str]):
"""
Args:
structure (Structure): Original structure
spacegroup (SpacegroupOperations): An input SpacegroupOperations from
SpacegroupAnalyzer.
equivalent_positions: Equivalent positions from SpacegroupAnalyzer.
wyckoff_letters: Wyckoff letters
"""
self.spacegroup = spacegroup
u, inv = np.unique(equivalent_positions, return_inverse=True)
self.site_labels = equivalent_positions
super().__init__(
structure.lattice, [site.species for site in structure],
structure.frac_coords, site_properties=structure.site_properties)
equivalent_indices = [[] for _ in range(len(u))] # type: ignore
equivalent_sites = [[] for _ in range(len(u))] # type: ignore
wyckoff_symbols = [[] for _ in range(len(u))] # type: ignore
for i, inv in enumerate(inv):
equivalent_indices[inv].append(i)
equivalent_sites[inv].append(self.sites[i])
wyckoff_symbols[inv].append(wyckoff_letters[i])
self.equivalent_indices: List[int] = equivalent_indices # type: ignore
self.equivalent_sites: List[PeriodicSite] = equivalent_sites # type: ignore
self.wyckoff_letters = wyckoff_letters
self.wyckoff_symbols = ["%d%s" % (len(w), w[0])
for w in wyckoff_symbols]
def copy(self):
"""
:return: Copy of structure.
"""
return self.__class__(self, spacegroup=self.spacegroup,
equivalent_positions=self.site_labels,
wyckoff_letters=self.wyckoff_letters)
def find_equivalent_sites(self, site) -> List[PeriodicSite]:
"""
Finds all symmetrically equivalent sites for a particular site
Args:
site (PeriodicSite): A site in the structure
Returns:
([PeriodicSite]): List of all symmetrically equivalent sites.
"""
for sites in self.equivalent_sites:
if site in sites:
return sites
raise ValueError("Site not in structure")
def __repr__(self):
return self.__str__()
def __str__(self):
outs = [
"SymmetrizedStructure",
"Full Formula ({s})".format(s=self.composition.formula),
"Reduced Formula: {}".format(self.composition.reduced_formula)]
def to_s(x):
return "%0.6f" % x
outs.append("abc : " + " ".join([to_s(i).rjust(10)
for i in self.lattice.abc]))
outs.append("angles: " + " ".join([to_s(i).rjust(10)
for i in self.lattice.angles]))
if self._charge:
if self._charge >= 0:
outs.append("Overall Charge: +{}".format(self._charge))
else:
outs.append("Overall Charge: -{}".format(self._charge))
outs.append("Sites ({i})".format(i=len(self)))
data = []
props = self.site_properties
keys = sorted(props.keys())
for i, sites in enumerate(self.equivalent_sites):
site = sites[0]
row = [str(i), site.species_string]
row.extend([to_s(j) for j in site.frac_coords])
row.append(self.wyckoff_symbols[i])
for k in keys:
row.append(props[k][i])
data.append(row)
outs.append(tabulate(data, headers=["#", "SP", "a", "b", "c", "Wyckoff"] + keys,
))
return "\n".join(outs)
def as_dict(self):
"""
:return: MSONAble dict
"""
return {
"structure": super().as_dict(),
"spacegroup": self.spacegroup,
"equivalent_positions": self.site_labels,
"wyckoff_letters": self.wyckoff_letters
}
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation
:return: SymmetrizedStructure
"""
return SymmetrizedStructure(
Structure.from_dict(d["structure"]),
spacegroup=d["spacegroup"],
equivalent_positions=d["equivalent_positions"],
wyckoff_letters=d["wyckoff_letters"]
)
|
mit
|
dmuhlhei/AliPhysics
|
PWGJE/EMCALJetTasks/Tracks/analysis/base/struct/EventHistogram.py
|
41
|
4257
|
#**************************************************************************
#* Copyright(c) 1998-2014, ALICE Experiment at CERN, All rights reserved. *
#* *
#* Author: The ALICE Off-line Project. *
#* Contributors are mentioned in the code where appropriate. *
#* *
#* Permission to use, copy, modify and distribute this software and its *
#* documentation strictly for non-commercial purposes is hereby granted *
#* without fee, provided that the above copyright notice appears in all *
#* copies and that both the copyright notice and this permission notice *
#* appear in the supporting documentation. The authors make no claims *
#* about the suitability of this software for any purpose. It is *
#* provided "as is" without express or implied warranty. *
#**************************************************************************
from copy import copy, deepcopy
class EventHistogram(object):
def __init__(self, histo):
self._histo = histo
self._vertexrange = {}
def GetROOTHisto(self):
return self._histo
def GetVertexRange(self):
return self._vertexrange
def SetVertexRange(self, vtxmin, vtxmax):
self._vertexrange["min"] = vtxmin
self._vertexrange["max"] = vtxmax
def GetEventCount(self):
print "Method virtual - to be implemented by inheriting classes"
def _Deepcopy(self, other, memo):
underlyinghist = other.GetROOTHisto()
self._histo = deepcopy(underlyinghist, memo)
self._vertexrange = deepcopy(other.GetVertexRange(), memo)
def _Copy(self, other):
underlyinghist = other.GetROOTHisto()
self._histo = copy(underlyinghist)
self._vertexrange = copy(other.GetVertexRange())
def Add(self, otherhisto):
self._histo.Add(otherhisto)
def Scale(self, scalefactor):
self._histo.Scale(scalefactor)
class EventHistogramOld(EventHistogram):
def __init__(self, histo):
EventHistogram.__init__(self, histo)
self.__usePileupRejected = True
def SetUsePileupRejected(self, doUse = True):
self.__usePileupRejected = doUse
def IsUsingPileupRejected(self):
return self.__usePileupRejected
def GetEventCount(self):
if len(self._vertexrange):
binMin = self._histo.GetYaxis().FindBin(self._vertexrange["min"])
binMax = self._histo.GetYaxis().FindBin(self._vertexrange["max"])
eventcounter = self._histo.ProjectionX("eventCounter", binMin, binMax)
else:
eventcounter = self._histo.ProjectionX("eventcounter")
pileupbin = 1
if self.__usePileupRejected:
pileupbin = 2
return eventcounter.GetBinContent(pileupbin)
def __copy__(self, other):
newobj = EventHistogramOld(None)
newobj._Copy(other)
newobj.SetUsePileupRejected(other.IsUsingPileupRejected())
return newobj
def __deepcopy__(self, other, memo):
newobj = EventHistogramOld(None)
newobj._Deepcopy(other, memo)
newobj.SetUsePileupRejected(other.IsUsingPileupRejected())
return newobj
class EventHistogramNew(EventHistogram):
def __init__(self, histo):
EventHistogram.__init__(self, histo)
def SetUsePileupRejected(self, doUse = True):
pass
def GetEventCount(self):
if not len(self._vertexrange):
return self._histo.Integral()
else:
binmin = self._histo.GetXaxis().FindBin(self._vertexrange["min"])
binmax = self._histo.GetXaxis().FindBin(self._vertexrange["max"])
return self._histo.Integral(binmin, binmax)
def __copy__(self, other):
newobj = EventHistogramNew(None)
newobj._Copy(other)
return newobj
def __deepcopy__(self, other, memo):
newobj = EventHistogramNew(None)
newobj._Deepcopy(other, memo)
return newobj
|
bsd-3-clause
|
jdamick/openshift-ansible
|
playbooks/aws/openshift-cluster/library/ec2_ami_find.py
|
27
|
9772
|
#!/usr/bin/python
#pylint: skip-file
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_ami_find
version_added: 2.0
short_description: Searches for AMIs to obtain the AMI ID and other information
description:
- Returns list of matching AMIs with AMI ID, along with other useful information
- Can search AMIs with different owners
- Can search by matching tag(s), by AMI name and/or other criteria
- Results can be sorted and sliced
author: Tom Bamford
notes:
- This module is not backwards compatible with the previous version of the ec2_search_ami module which worked only for Ubuntu AMIs listed on cloud-images.ubuntu.com.
- See the example below for a suggestion of how to search by distro/release.
options:
region:
description:
- The AWS region to use.
required: true
aliases: [ 'aws_region', 'ec2_region' ]
owner:
description:
- Search AMIs owned by the specified owner
- Can specify an AWS account ID, or one of the special IDs 'self', 'amazon' or 'aws-marketplace'
- If not specified, all EC2 AMIs in the specified region will be searched.
- You can include wildcards in many of the search options. An asterisk (*) matches zero or more characters, and a question mark (?) matches exactly one character. You can escape special characters using a backslash (\) before the character. For example, a value of \*amazon\?\\ searches for the literal string *amazon?\.
required: false
default: null
ami_id:
description:
- An AMI ID to match.
default: null
required: false
ami_tags:
description:
- A hash/dictionary of tags to match for the AMI.
default: null
required: false
architecture:
description:
- An architecture type to match (e.g. x86_64).
default: null
required: false
hypervisor:
description:
- A hypervisor type type to match (e.g. xen).
default: null
required: false
is_public:
description:
- Whether or not the image(s) are public.
choices: ['yes', 'no']
default: null
required: false
name:
description:
- An AMI name to match.
default: null
required: false
platform:
description:
- Platform type to match.
default: null
required: false
sort:
description:
- Optional attribute which with to sort the results.
- If specifying 'tag', the 'tag_name' parameter is required.
choices: ['name', 'description', 'tag']
default: null
required: false
sort_tag:
description:
- Tag name with which to sort results.
- Required when specifying 'sort=tag'.
default: null
required: false
sort_order:
description:
- Order in which to sort results.
- Only used when the 'sort' parameter is specified.
choices: ['ascending', 'descending']
default: 'ascending'
required: false
sort_start:
description:
- Which result to start with (when sorting).
- Corresponds to Python slice notation.
default: null
required: false
sort_end:
description:
- Which result to end with (when sorting).
- Corresponds to Python slice notation.
default: null
required: false
state:
description:
- AMI state to match.
default: 'available'
required: false
virtualization_type:
description:
- Virtualization type to match (e.g. hvm).
default: null
required: false
no_result_action:
description:
- What to do when no results are found.
- "'success' reports success and returns an empty array"
- "'fail' causes the module to report failure"
choices: ['success', 'fail']
default: 'success'
required: false
requirements:
- boto
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Search for the AMI tagged "project:website"
- ec2_ami_find:
owner: self
tags:
project: website
no_result_action: fail
register: ami_find
# Search for the latest Ubuntu 14.04 AMI
- ec2_ami_find:
name: "ubuntu/images/ebs/ubuntu-trusty-14.04-amd64-server-*"
owner: 099720109477
sort: name
sort_order: descending
sort_end: 1
register: ami_find
# Launch an EC2 instance
- ec2:
image: "{{ ami_search.results[0].ami_id }}"
instance_type: m4.medium
key_name: mykey
wait: yes
'''
try:
import boto.ec2
HAS_BOTO=True
except ImportError:
HAS_BOTO=False
import json
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
region = dict(required=True,
aliases = ['aws_region', 'ec2_region']),
owner = dict(required=False, default=None),
ami_id = dict(required=False),
ami_tags = dict(required=False, type='dict',
aliases = ['search_tags', 'image_tags']),
architecture = dict(required=False),
hypervisor = dict(required=False),
is_public = dict(required=False),
name = dict(required=False),
platform = dict(required=False),
sort = dict(required=False, default=None,
choices=['name', 'description', 'tag']),
sort_tag = dict(required=False),
sort_order = dict(required=False, default='ascending',
choices=['ascending', 'descending']),
sort_start = dict(required=False),
sort_end = dict(required=False),
state = dict(required=False, default='available'),
virtualization_type = dict(required=False),
no_result_action = dict(required=False, default='success',
choices = ['success', 'fail']),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module, install via pip or your package manager')
ami_id = module.params.get('ami_id')
ami_tags = module.params.get('ami_tags')
architecture = module.params.get('architecture')
hypervisor = module.params.get('hypervisor')
is_public = module.params.get('is_public')
name = module.params.get('name')
owner = module.params.get('owner')
platform = module.params.get('platform')
sort = module.params.get('sort')
sort_tag = module.params.get('sort_tag')
sort_order = module.params.get('sort_order')
sort_start = module.params.get('sort_start')
sort_end = module.params.get('sort_end')
state = module.params.get('state')
virtualization_type = module.params.get('virtualization_type')
no_result_action = module.params.get('no_result_action')
filter = {'state': state}
if ami_id:
filter['image_id'] = ami_id
if ami_tags:
for tag in ami_tags:
filter['tag:'+tag] = ami_tags[tag]
if architecture:
filter['architecture'] = architecture
if hypervisor:
filter['hypervisor'] = hypervisor
if is_public:
filter['is_public'] = is_public
if name:
filter['name'] = name
if platform:
filter['platform'] = platform
if virtualization_type:
filter['virtualization_type'] = virtualization_type
ec2 = ec2_connect(module)
images_result = ec2.get_all_images(owners=owner, filters=filter)
if no_result_action == 'fail' and len(images_result) == 0:
module.fail_json(msg="No AMIs matched the attributes: %s" % json.dumps(filter))
results = []
for image in images_result:
data = {
'ami_id': image.id,
'architecture': image.architecture,
'description': image.description,
'is_public': image.is_public,
'name': image.name,
'owner_id': image.owner_id,
'platform': image.platform,
'root_device_name': image.root_device_name,
'root_device_type': image.root_device_type,
'state': image.state,
'tags': image.tags,
'virtualization_type': image.virtualization_type,
}
if image.kernel_id:
data['kernel_id'] = image.kernel_id
if image.ramdisk_id:
data['ramdisk_id'] = image.ramdisk_id
results.append(data)
if sort == 'tag':
if not sort_tag:
module.fail_json(msg="'sort_tag' option must be given with 'sort=tag'")
results.sort(key=lambda e: e['tags'][sort_tag], reverse=(sort_order=='descending'))
elif sort:
results.sort(key=lambda e: e[sort], reverse=(sort_order=='descending'))
try:
if sort and sort_start and sort_end:
results = results[int(sort_start):int(sort_end)]
elif sort and sort_start:
results = results[int(sort_start):]
elif sort and sort_end:
results = results[:int(sort_end)]
except TypeError:
module.fail_json(msg="Please supply numeric values for sort_start and/or sort_end")
module.exit_json(results=results)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
|
apache-2.0
|
orgito/ansible
|
lib/ansible/utils/module_docs_fragments/emc.py
|
35
|
1140
|
#
# Copyright (c) 2018, Luca 'remix_tj' Lorenzetto <[email protected]>
#
# This file is part of Ansible
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
DOCUMENTATION = """
options:
- See respective platform section for more details
requirements:
- See respective platform section for more details
notes:
- Ansible modules are available for EMC VNX.
"""
# Documentation fragment for VNX (emc_vnx)
EMC_VNX = """
options:
sp_address:
description:
- Address of the SP of target/secondary storage.
required: true
sp_user:
description:
- Username for accessing SP.
default: sysadmin
required: false
sp_password:
description:
- password for accessing SP.
default: sysadmin
required: false
requirements:
- An EMC VNX Storage device.
- Ansible 2.7.
- storops (0.5.10 or greater). Install using 'pip install storops'.
notes:
- The modules prefixed with emc_vnx are built to support the EMC VNX storage platform.
"""
|
gpl-3.0
|
CEG-FYP-OpenStack/scheduler
|
nova/tests/unit/scheduler/filters/test_compute_capabilities_filters.py
|
12
|
5114
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from nova import objects
from nova.scheduler.filters import compute_capabilities_filter
from nova import test
from nova.tests.unit.scheduler import fakes
class TestComputeCapabilitiesFilter(test.NoDBTestCase):
def setUp(self):
super(TestComputeCapabilitiesFilter, self).setUp()
self.filt_cls = compute_capabilities_filter.ComputeCapabilitiesFilter()
def _do_test_compute_filter_extra_specs(self, ecaps, especs, passes):
# In real OpenStack runtime environment,compute capabilities
# value may be number, so we should use number to do unit test.
capabilities = {}
capabilities.update(ecaps)
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(memory_mb=1024, extra_specs=especs))
host_state = {'free_ram_mb': 1024}
host_state.update(capabilities)
host = fakes.FakeHostState('host1', 'node1', host_state)
assertion = self.assertTrue if passes else self.assertFalse
assertion(self.filt_cls.host_passes(host, spec_obj))
def test_compute_filter_passes_without_extra_specs(self):
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(memory_mb=1024))
host_state = {'free_ram_mb': 1024}
host = fakes.FakeHostState('host1', 'node1', host_state)
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_compute_filter_fails_without_host_state(self):
especs = {'capabilities': '1'}
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(memory_mb=1024, extra_specs=especs))
self.assertFalse(self.filt_cls.host_passes(None, spec_obj))
def test_compute_filter_fails_without_capabilites(self):
cpu_info = """ { } """
cpu_info = six.text_type(cpu_info)
self._do_test_compute_filter_extra_specs(
ecaps={'cpu_info': cpu_info},
especs={'capabilities:cpu_info:vendor': 'Intel'},
passes=False)
def test_compute_filter_pass_cpu_info_as_text_type(self):
cpu_info = """ { "vendor": "Intel", "model": "core2duo",
"arch": "i686","features": ["lahf_lm", "rdtscp"], "topology":
{"cores": 1, "threads":1, "sockets": 1}} """
cpu_info = six.text_type(cpu_info)
self._do_test_compute_filter_extra_specs(
ecaps={'cpu_info': cpu_info},
especs={'capabilities:cpu_info:vendor': 'Intel'},
passes=True)
def test_compute_filter_fail_cpu_info_as_text_type_not_valid(self):
cpu_info = "cpu_info"
cpu_info = six.text_type(cpu_info)
self._do_test_compute_filter_extra_specs(
ecaps={'cpu_info': cpu_info},
especs={'capabilities:cpu_info:vendor': 'Intel'},
passes=False)
def test_compute_filter_passes_extra_specs_simple(self):
self._do_test_compute_filter_extra_specs(
ecaps={'stats': {'opt1': 1, 'opt2': 2}},
especs={'opt1': '1', 'opt2': '2', 'trust:trusted_host': 'true'},
passes=True)
def test_compute_filter_fails_extra_specs_simple(self):
self._do_test_compute_filter_extra_specs(
ecaps={'stats': {'opt1': 1, 'opt2': 2}},
especs={'opt1': '1', 'opt2': '222', 'trust:trusted_host': 'true'},
passes=False)
def test_compute_filter_pass_extra_specs_simple_with_scope(self):
self._do_test_compute_filter_extra_specs(
ecaps={'stats': {'opt1': 1, 'opt2': 2}},
especs={'capabilities:opt1': '1',
'trust:trusted_host': 'true'},
passes=True)
def test_compute_filter_pass_extra_specs_same_as_scope(self):
# Make sure this still works even if the key is the same as the scope
self._do_test_compute_filter_extra_specs(
ecaps={'capabilities': 1},
especs={'capabilities': '1'},
passes=True)
def test_compute_filter_extra_specs_simple_with_wrong_scope(self):
self._do_test_compute_filter_extra_specs(
ecaps={'opt1': 1, 'opt2': 2},
especs={'wrong_scope:opt1': '1',
'trust:trusted_host': 'true'},
passes=True)
def test_compute_filter_extra_specs_pass_multi_level_with_scope(self):
self._do_test_compute_filter_extra_specs(
ecaps={'stats': {'opt1': {'a': 1, 'b': {'aa': 2}}, 'opt2': 2}},
especs={'opt1:a': '1', 'capabilities:opt1:b:aa': '2',
'trust:trusted_host': 'true'},
passes=True)
|
apache-2.0
|
mifl/android_kernel_pantech_oscar
|
tools/perf/scripts/python/syscall-counts.py
|
11181
|
1522
|
# system call counts
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
|
gpl-2.0
|
littlstar/chromium.src
|
native_client_sdk/src/build_tools/sdk_tools/sdk_update_common.py
|
168
|
3249
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions for sdk_update.py and sdk_update_main.py."""
import errno
import logging
import os
import shutil
import subprocess
import sys
import time
class Error(Exception):
"""Generic error/exception for sdk_update module"""
pass
def MakeDirs(directory):
if not os.path.exists(directory):
logging.info('Making directory %s' % (directory,))
os.makedirs(directory)
def RemoveDir(outdir):
"""Removes the given directory
On Unix systems, this just runs shutil.rmtree, but on Windows, this doesn't
work when the directory contains junctions (as does our SDK installer).
Therefore, on Windows, it runs rmdir /S /Q as a shell command. This always
does the right thing on Windows. If the directory already didn't exist,
RemoveDir will return successfully without taking any action.
Args:
outdir: The directory to delete
Raises:
Error - If this operation fails for any reason.
"""
max_tries = 5
last_exception = None
for num_tries in xrange(max_tries):
try:
shutil.rmtree(outdir)
return
except OSError as e:
if not os.path.exists(outdir):
# The directory can't be removed because it doesn't exist.
return
last_exception = e
# On Windows this could be an issue with junctions, so try again with
# rmdir.
if sys.platform == 'win32':
try:
cmd = ['rmdir', '/S', '/Q', outdir]
process = subprocess.Popen(cmd, stderr=subprocess.PIPE, shell=True)
_, stderr = process.communicate()
if process.returncode != 0:
raise Error('\"%s\" failed with code %d. Output:\n %s' % (
' '.join(cmd), process.returncode, stderr))
return
# Ignore failures, we'll just try again.
except subprocess.CalledProcessError as e:
# CalledProcessError has no error message, generate one.
last_exception = Error('\"%s\" failed with code %d.' % (
' '.join(e.cmd), e.returncode))
except Error as e:
last_exception = e
# Didn't work, sleep and try again.
time.sleep(num_tries + 1)
# Failed.
raise Error('Unable to remove directory "%s"\n %s' % (outdir,
last_exception))
def RenameDir(srcdir, destdir):
"""Renames srcdir to destdir. Removes destdir before doing the
rename if it already exists."""
max_tries = 5
num_tries = 0
for num_tries in xrange(max_tries):
try:
RemoveDir(destdir)
shutil.move(srcdir, destdir)
return
except OSError as err:
if err.errno != errno.EACCES:
raise err
# If we are here, we didn't exit due to raised exception, so we are
# handling a Windows flaky access error. Sleep one second and try
# again.
time.sleep(num_tries + 1)
# end of while loop -- could not RenameDir
raise Error('Could not RenameDir %s => %s after %d tries.\n'
'Please check that no shells or applications '
'are accessing files in %s.'
% (srcdir, destdir, num_tries + 1, destdir))
|
bsd-3-clause
|
5hubh4m/CS231n
|
Assignment3/cs231n/im2col.py
|
53
|
2090
|
import numpy as np
def get_im2col_indices(x_shape, field_height, field_width, padding=1, stride=1):
# First figure out what the size of the output should be
N, C, H, W = x_shape
assert (H + 2 * padding - field_height) % stride == 0
assert (W + 2 * padding - field_height) % stride == 0
out_height = (H + 2 * padding - field_height) / stride + 1
out_width = (W + 2 * padding - field_width) / stride + 1
i0 = np.repeat(np.arange(field_height), field_width)
i0 = np.tile(i0, C)
i1 = stride * np.repeat(np.arange(out_height), out_width)
j0 = np.tile(np.arange(field_width), field_height * C)
j1 = stride * np.tile(np.arange(out_width), out_height)
i = i0.reshape(-1, 1) + i1.reshape(1, -1)
j = j0.reshape(-1, 1) + j1.reshape(1, -1)
k = np.repeat(np.arange(C), field_height * field_width).reshape(-1, 1)
return (k, i, j)
def im2col_indices(x, field_height, field_width, padding=1, stride=1):
""" An implementation of im2col based on some fancy indexing """
# Zero-pad the input
p = padding
x_padded = np.pad(x, ((0, 0), (0, 0), (p, p), (p, p)), mode='constant')
k, i, j = get_im2col_indices(x.shape, field_height, field_width, padding,
stride)
cols = x_padded[:, k, i, j]
C = x.shape[1]
cols = cols.transpose(1, 2, 0).reshape(field_height * field_width * C, -1)
return cols
def col2im_indices(cols, x_shape, field_height=3, field_width=3, padding=1,
stride=1):
""" An implementation of col2im based on fancy indexing and np.add.at """
N, C, H, W = x_shape
H_padded, W_padded = H + 2 * padding, W + 2 * padding
x_padded = np.zeros((N, C, H_padded, W_padded), dtype=cols.dtype)
k, i, j = get_im2col_indices(x_shape, field_height, field_width, padding,
stride)
cols_reshaped = cols.reshape(C * field_height * field_width, -1, N)
cols_reshaped = cols_reshaped.transpose(2, 0, 1)
np.add.at(x_padded, (slice(None), k, i, j), cols_reshaped)
if padding == 0:
return x_padded
return x_padded[:, :, padding:-padding, padding:-padding]
pass
|
mit
|
vjmac15/Lyilis
|
lib/nacl/signing (VJ Washington's conflicted copy 2017-08-29).py
|
17
|
6661
|
# Copyright 2013 Donald Stufft and individual contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import six
from nacl import encoding
import nacl.bindings
from nacl.public import (PrivateKey as _Curve25519_PrivateKey,
PublicKey as _Curve25519_PublicKey)
from nacl.utils import StringFixer, random
class SignedMessage(six.binary_type):
"""
A bytes subclass that holds a messaged that has been signed by a
:class:`SigningKey`.
"""
@classmethod
def _from_parts(cls, signature, message, combined):
obj = cls(combined)
obj._signature = signature
obj._message = message
return obj
@property
def signature(self):
"""
The signature contained within the :class:`SignedMessage`.
"""
return self._signature
@property
def message(self):
"""
The message contained within the :class:`SignedMessage`.
"""
return self._message
class VerifyKey(encoding.Encodable, StringFixer, object):
"""
The public key counterpart to an Ed25519 SigningKey for producing digital
signatures.
:param key: [:class:`bytes`] Serialized Ed25519 public key
:param encoder: A class that is able to decode the `key`
"""
def __init__(self, key, encoder=encoding.RawEncoder):
# Decode the key
key = encoder.decode(key)
if not isinstance(key, bytes):
raise TypeError("VerifyKey must be created from 32 bytes")
if len(key) != nacl.bindings.crypto_sign_PUBLICKEYBYTES:
raise ValueError(
"The key must be exactly %s bytes long" %
nacl.bindings.crypto_sign_PUBLICKEYBYTES,
)
self._key = key
def __bytes__(self):
return self._key
def verify(self, smessage, signature=None, encoder=encoding.RawEncoder):
"""
Verifies the signature of a signed message, returning the message
if it has not been tampered with else raising
:class:`~nacl.signing.BadSignatureError`.
:param smessage: [:class:`bytes`] Either the original messaged or a
signature and message concated together.
:param signature: [:class:`bytes`] If an unsigned message is given for
smessage then the detached signature must be provided.
:param encoder: A class that is able to decode the secret message and
signature.
:rtype: :class:`bytes`
"""
if signature is not None:
# If we were given the message and signature separately, combine
# them.
smessage = signature + smessage
# Decode the signed message
smessage = encoder.decode(smessage)
return nacl.bindings.crypto_sign_open(smessage, self._key)
def to_curve25519_public_key(self):
"""
Converts a :class:`~nacl.signing.VerifyKey` to a
:class:`~nacl.public.PublicKey`
:rtype: :class:`~nacl.public.PublicKey`
"""
raw_pk = nacl.bindings.crypto_sign_ed25519_pk_to_curve25519(self._key)
return _Curve25519_PublicKey(raw_pk)
class SigningKey(encoding.Encodable, StringFixer, object):
"""
Private key for producing digital signatures using the Ed25519 algorithm.
Signing keys are produced from a 32-byte (256-bit) random seed value. This
value can be passed into the :class:`~nacl.signing.SigningKey` as a
:func:`bytes` whose length is 32.
.. warning:: This **must** be protected and remain secret. Anyone who knows
the value of your :class:`~nacl.signing.SigningKey` or it's seed can
masquerade as you.
:param seed: [:class:`bytes`] Random 32-byte value (i.e. private key)
:param encoder: A class that is able to decode the seed
:ivar: verify_key: [:class:`~nacl.signing.VerifyKey`] The verify
(i.e. public) key that corresponds with this signing key.
"""
def __init__(self, seed, encoder=encoding.RawEncoder):
# Decode the seed
seed = encoder.decode(seed)
if not isinstance(seed, bytes):
raise TypeError("SigningKey must be created from a 32 byte seed")
# Verify that our seed is the proper size
if len(seed) != nacl.bindings.crypto_sign_SEEDBYTES:
raise ValueError(
"The seed must be exactly %d bytes long" %
nacl.bindings.crypto_sign_SEEDBYTES
)
public_key, secret_key = nacl.bindings.crypto_sign_seed_keypair(seed)
self._seed = seed
self._signing_key = secret_key
self.verify_key = VerifyKey(public_key)
def __bytes__(self):
return self._seed
@classmethod
def generate(cls):
"""
Generates a random :class:`~nacl.signing.SingingKey` object.
:rtype: :class:`~nacl.signing.SigningKey`
"""
return cls(
random(nacl.bindings.crypto_sign_SEEDBYTES),
encoder=encoding.RawEncoder,
)
def sign(self, message, encoder=encoding.RawEncoder):
"""
Sign a message using this key.
:param message: [:class:`bytes`] The data to be signed.
:param encoder: A class that is used to encode the signed message.
:rtype: :class:`~nacl.signing.SignedMessage`
"""
raw_signed = nacl.bindings.crypto_sign(message, self._signing_key)
crypto_sign_BYTES = nacl.bindings.crypto_sign_BYTES
signature = encoder.encode(raw_signed[:crypto_sign_BYTES])
message = encoder.encode(raw_signed[crypto_sign_BYTES:])
signed = encoder.encode(raw_signed)
return SignedMessage._from_parts(signature, message, signed)
def to_curve25519_private_key(self):
"""
Converts a :class:`~nacl.signing.SigningKey` to a
:class:`~nacl.public.PrivateKey`
:rtype: :class:`~nacl.public.PrivateKey`
"""
sk = self._signing_key
raw_private = nacl.bindings.crypto_sign_ed25519_sk_to_curve25519(sk)
return _Curve25519_PrivateKey(raw_private)
|
gpl-3.0
|
marc-sensenich/ansible
|
lib/ansible/modules/cloud/google/gcp_redis_instance_facts.py
|
9
|
7546
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_redis_instance_facts
description:
- Gather facts for GCP Instance
short_description: Gather facts for GCP Instance
version_added: 2.8
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
region:
description:
- The name of the Redis region of the instance.
required: true
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: a instance facts
gcp_redis_instance_facts:
region: us-central1
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
items:
description: List of items
returned: always
type: complex
contains:
alternativeLocationId:
description:
- Only applicable to STANDARD_HA tier which protects the instance against zonal
failures by provisioning it across two zones.
- If provided, it must be a different zone from the one provided in [locationId].
returned: success
type: str
authorizedNetwork:
description:
- The full name of the Google Compute Engine network to which the instance is
connected. If left unspecified, the default network will be used.
returned: success
type: str
createTime:
description:
- The time the instance was created in RFC3339 UTC "Zulu" format, accurate to
nanoseconds.
returned: success
type: str
currentLocationId:
description:
- The current zone where the Redis endpoint is placed.
- For Basic Tier instances, this will always be the same as the [locationId]
provided by the user at creation time. For Standard Tier instances, this can
be either [locationId] or [alternativeLocationId] and can change after a failover
event.
returned: success
type: str
displayName:
description:
- An arbitrary and optional user-provided name for the instance.
returned: success
type: str
host:
description:
- Hostname or IP address of the exposed Redis endpoint used by clients to connect
to the service.
returned: success
type: str
labels:
description:
- Resource labels to represent user provided metadata.
returned: success
type: dict
redisConfigs:
description:
- Redis configuration parameters, according to U(http://redis.io/topics/config.)
- 'Please check Memorystore documentation for the list of supported parameters:
U(https://cloud.google.com/memorystore/docs/redis/reference/rest/v1/projects.locations.instances#Instance.FIELDS.redis_configs)
.'
returned: success
type: dict
locationId:
description:
- The zone where the instance will be provisioned. If not provided, the service
will choose a zone for the instance. For STANDARD_HA tier, instances will
be created across two zones for protection against zonal failures. If [alternativeLocationId]
is also provided, it must be different from [locationId].
returned: success
type: str
name:
description:
- The ID of the instance or a fully qualified identifier for the instance. .
returned: success
type: str
memorySizeGb:
description:
- Redis memory size in GiB.
returned: success
type: int
port:
description:
- The port number of the exposed Redis endpoint.
returned: success
type: int
redisVersion:
description:
- The version of Redis software. If not provided, latest supported version will
be used. Updating the version will perform an upgrade/downgrade to the new
version. Currently, the supported values are REDIS_3_2 for Redis 3.2.
returned: success
type: str
reservedIpRange:
description:
- The CIDR range of internal addresses that are reserved for this instance.
If not provided, the service will choose an unused /29 block, for example,
10.0.0.0/29 or 192.168.0.0/29. Ranges must be unique and non-overlapping with
existing subnets in an authorized network.
returned: success
type: str
tier:
description:
- 'The service tier of the instance. Must be one of these values: - BASIC: standalone
instance - STANDARD_HA: highly available primary/replica instances .'
returned: success
type: str
region:
description:
- The name of the Redis region of the instance.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict(region=dict(required=True, type='str')))
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform']
items = fetch_list(module, collection(module))
if items.get('instances'):
items = items.get('instances')
else:
items = []
return_value = {'items': items}
module.exit_json(**return_value)
def collection(module):
return "https://redis.googleapis.com/v1/projects/{project}/locations/{region}/instances".format(**module.params)
def fetch_list(module, link):
auth = GcpSession(module, 'redis')
response = auth.get(link)
return return_if_object(module, response)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
|
gpl-3.0
|
mrquim/repository.mrquim
|
plugin.video.neptune-1.2.2/resources/lib/modules/realdebrid.py
|
5
|
15187
|
import util
import threading, time, json, os, ast
import xbmc, xbmcplugin, xbmcaddon, xbmcgui
import urllib, urllib2, cookielib
import requests, string
client_id="MN55HGIQEO2BE" #realdebrid clientid
# reset realdebrid, for testing
"""xbmcaddon.Addon().setSetting('rd_id', "")
xbmcaddon.Addon().setSetting('rd_secret', "")
xbmcaddon.Addon().setSetting('rd_access', "")
xbmcaddon.Addon().setSetting('rd_refresh', "")"""
urlopen = urllib2.urlopen
cj = cookielib.LWPCookieJar()
Request = urllib2.Request
ADDON_ID='plugin.video.neptune'
addon=xbmcaddon.Addon(id=ADDON_ID)
home=xbmc.translatePath(addon.getAddonInfo('path').decode('utf-8'))
def checkDetails():
if xbmcaddon.Addon().getSetting('rd_id')=="" or xbmcaddon.Addon().getSetting('rd_secret')=="" or xbmcaddon.Addon().getSetting('rd_access')=="" or xbmcaddon.Addon().getSetting('rd_refresh')=="":
ok = xbmcgui.Dialog().yesno("RealDebrid not configured", "You have not configured RealDebrid, you cannot proceed without doing this. Do you want to do this now?")
if ok:
return auth()
else:
return False
else:
refreshToken()
return True
def auth():
xbmc.executebuiltin('ActivateWindow(10138)')
authData=util.getURL("https://api.real-debrid.com/oauth/v2/device/code?client_id="+client_id+"&new_credentials=yes")
authThread=threading.Thread(target=verifyThread, args=(authData,))
authThread.start()
def verifyThread(authData):
xbmc.executebuiltin('Dialog.Close(10138)')
# convert string to JSON
authJSON=json.loads(authData)
# create dialog with progress to show information
authMsg="To authorise your RealDebrid account, use a browser to browse to [B]"+authJSON['verification_url']+"[/B] and enter the verification code [B]"+authJSON['user_code']+"[/B]"
authDialog=util.progressStart("RealDebrid Authentication", authMsg)
authorised=False
timer=0
credJSON=""
while not authorised:
time.sleep(2)
timer=timer+2
util.progressUpdate(authDialog, timer, authMsg)
# check if we need to exit
if util.progressCancelled(authDialog)==True:
util.progressStop(authDialog)
break
if timer==100:
util.progressStop(authDialog)
util.alert("RealDebrid aithentication has timed out. Please try again.")
break
# all good to carry on lets check auth
credentials=util.getURL("https://api.real-debrid.com/oauth/v2/device/credentials?client_id="+client_id+"&code="+authJSON['device_code'])
if credentials!=False:
try:
if "error" in credentials:
util.logError(credentials)
else:
credJSON=json.loads(credentials)
#store credentials in settings
xbmcaddon.Addon().setSetting('rd_id', credJSON['client_id'])
xbmcaddon.Addon().setSetting('rd_secret', credJSON['client_secret'])
cj_rd = cookielib.CookieJar()
opener_rd = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj_rd))
data_rd = urllib.urlencode({'client_id' : credJSON['client_id'], 'client_secret' : credJSON['client_secret'], 'code': authJSON['device_code'], 'grant_type' : 'http://oauth.net/grant_type/device/1.0'})
try:
#util.logError(str(data_rd))
resp = opener_rd.open('https://api.real-debrid.com/oauth/v2/token', data_rd)
content=resp.read()
credJSON=json.loads(content)
xbmcaddon.Addon().setSetting('rd_access', credJSON['access_token'])
xbmcaddon.Addon().setSetting('rd_refresh', credJSON['refresh_token'])
authorised=True
except Exception as e:
util.logError(str(e))
except Exception as e:
util.logError(str(e))
# check how we exited loop
util.progressStop(authDialog)
if authorised==True:
util.alert("RealDebrid authenticated.")
return True
else:
util.alert("There was an error authenticating with RealDebrid")
return False
def refreshToken():
cj_rd = cookielib.CookieJar()
opener_rd = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj_rd))
data_rd = urllib.urlencode({'client_id' : xbmcaddon.Addon().getSetting('rd_id'), 'client_secret' : xbmcaddon.Addon().getSetting('rd_secret'), 'code': xbmcaddon.Addon().getSetting('rd_refresh'), 'grant_type' : 'http://oauth.net/grant_type/device/1.0'})
try:
resp = opener_rd.open('https://api.real-debrid.com/oauth/v2/token', data_rd)
content=resp.read()
credJSON=json.loads(content)
xbmcaddon.Addon().setSetting('rd_access', credJSON['access_token'])
xbmcaddon.Addon().setSetting('rd_refresh', credJSON['refresh_token'])
#util.logError("write complete: "+str(credJSON))
#util.logError("checking values"+xbmcaddon.Addon().getSetting('rd_access')+" "+xbmcaddon.Addon().getSetting('rd_refresh'))
authorised=True
except Exception as e:
util.logError("Error Refreshing Token: "+str(e))
def hostStatus():
from collections import OrderedDict
cj_rd = cookielib.CookieJar()
opener_rd = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj_rd))
opener_rd.addheaders=[("Authorization", "Bearer "+str(xbmcaddon.Addon().getSetting('rd_access')))]
error=True
attempts=0
while error:
try:
resp = opener_rd.open('https://api.real-debrid.com/rest/1.0/hosts/status')
content=resp.read()
credJSON=json.loads(content)
#util.logError(str(credJSON))
return credJSON
except Exception as e:
e=str(e)
util.logError("hoststaus error: "+e)
attempts=attempts+1
if attempts>3:
error=True
return False
elif "Unauthorized" in e:
refreshToken()
def unrestrict(parameters):
cj_rd = cookielib.CookieJar()
opener_rd = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj_rd))
opener_rd.addheaders=[("Authorization", "Bearer "+str(xbmcaddon.Addon().getSetting('rd_access')))]
if 'url' in parameters:
link=parameters['url']
else:
link=util.searchDialog("Enter link to unrestrict")
if link:
data_rd = urllib.urlencode({'link' : link})
error=True
attempts=0
while error:
try:
resp = opener_rd.open('https://api.real-debrid.com/rest/1.0/unrestrict/link', data_rd)
content=resp.read()
credJSON=json.loads(content)
error=True
return credJSON
except Exception as e:
util.logError("realdebrid error: "+str(e))
attempts=attempts+1
if attempts>3:
error=True
break
elif "Unauthorized" in e:
refreshToken()
return False
def addTorrent(parameters, remove=False, all=False):
refreshToken()
if "torrent_file" not in parameters:
dialog = xbmcgui.Dialog()
link = dialog.browseSingle(1, 'Select .torrent file', 'files', '.torrent', False, False, 'special://masterprofile/script_data/Kodi Lyrics').decode('utf-8')
else:
link=parameters['torrent_file']
file=open(link, 'rb')
cont=8
while cont==8:
headers={"Authorization": "Bearer "+str(xbmcaddon.Addon().getSetting('rd_access'))}
r = requests.put("https://api.real-debrid.com/rest/1.0/torrents/addTorrent", data=file, headers=headers)
content=json.loads(r.text)
cont=isError(content)
file.close()
try:
if remove:
os.remove(link)
except:
util.logError("Unable to remove file '"+link+"'")
if cont:
return False
else:
return torrentSelect(content['id'], all)
def addMagent(parameters):
refreshToken()
if 'link' not in parameters:
link=util.searchDialog("Enter magnet link")
else:
link=parameters['link']
if 'all' not in parameters:
all=False
else:
all=parameters['all']
headers={"Authorization": "Bearer "+str(xbmcaddon.Addon().getSetting('rd_access'))}
r = requests.post("https://api.real-debrid.com/rest/1.0/torrents/addMagnet", data={"magnet":link}, headers=headers)
content=json.loads(r.text)
cont=isError(content)
if cont:
return False
else:
return torrentSelect(content['id'], all)
def torrentSelect(id, all):
tinfo=torrentsInfo(id)
if isinstance(tinfo, dict):
if all:
files=["all"]
else:
files=[]
for file in tinfo['files']:
files.append(file['path'])
dialog = xbmcgui.Dialog()
ret = dialog.multiselect("Select files you want to download", files)
if ret:
ret=map(lambda x: x + 1, ret)
ret= map(str, ret)
headers={"Authorization": "Bearer "+str(xbmcaddon.Addon().getSetting('rd_access'))}
r = requests.post('https://api.real-debrid.com/rest/1.0/torrents/selectFiles/'+id, data={'files' : ",".join(ret)}, headers=headers)
return id
else:
torrentsDelete(id)
return False
else:
return False
def torrentsInfo(id):
cj_rd = cookielib.CookieJar()
opener_rd = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj_rd))
cont=8
while cont==8:
opener_rd.addheaders=[("Authorization", "Bearer "+str(xbmcaddon.Addon().getSetting('rd_access')))]
resp = opener_rd.open("https://api.real-debrid.com/rest/1.0/torrents/info/"+str(id))
content=json.loads(resp.read())
cont=isError(content)
if cont:
return False
else:
return content
def torrentsDelete(id):
refreshToken()
headers={"Authorization": "Bearer "+str(xbmcaddon.Addon().getSetting('rd_access'))}
r = requests.delete("https://api.real-debrid.com/rest/1.0/torrents/delete/"+str(id), headers=headers)
if r.status_code==404:
util.alert("Unable to delete torrent, permission denied.")
return False
elif r.status_code==403:
util.alert("Unable to delete torrent, torrent not found.")
return False
elif r.status_code==401:
util.alert("Unable to delete torrent.")
return False
return True
def isError(toCheck):
try:
if toCheck['error']:
if toCheck['error_code']==8:
# need to refresh token
refreshToken()
return 8
else:
util.alert("Error "+str(toCheck['error_code'])+": "+string.capwords(toCheck['error'].replace("_", " ")))
util.logError("Error "+str(toCheck['error_code'])+": "+toCheck['error'])
return True
except:
return False
def downloads(parameters):
refreshToken()
headers={"Authorization": "Bearer "+str(xbmcaddon.Addon().getSetting('rd_access'))}
extras=ast.literal_eval(parameters['extras'])
data={"offset":extras['offset'], "limit":extras['limit']}
r = requests.get("https://api.real-debrid.com/rest/1.0/downloads", data=data, headers=headers)
links=json.loads(r.text)
menu=[]
for item in links:
menu.append({
"title": item['filename'],
"url": item['download'],
"mode":10,
"poster":os.path.join(home, '', 'icon.png'),
"icon":os.path.join(home, '', 'icon.png'),
"fanart":os.path.join(home, '', 'fanart.jpg'),
"type":"video",
"plot":item['host'],
"isFolder":False,
"playable":False,
"method":"downloads",
"id":item['id']
})
util.addMenuItems(menu)
def torrents(parameters):
refreshToken()
headers={"Authorization": "Bearer "+str(xbmcaddon.Addon().getSetting('rd_access'))}
extras=ast.literal_eval(parameters['extras'])
data={"offset":extras['offset'], "limit":extras['limit'], "filter": "active"}
r = requests.get("https://api.real-debrid.com/rest/1.0/torrents", data=data, headers=headers)
links=json.loads(r.text)
#util.logError(str(links))
menu=[]
for item in links:
if item['status'] == "downloaded":
#util.logError(str(torrentsInfo(item['id'])))
name=item['filename']
url=item['links'][0]
mode=5
elif item['status']== "downloading":
name="[Downloading "+str(item['progress'])+"%] "+item['filename']
url=""
mode=""
else:
name="["+item['status']+"] "+item['filename']
url=""
mode=""
util.logError("..>"+name)
menu.append({
"title": name,
"url": url,
"mode": mode,
"poster":os.path.join(home, '', 'icon.png'),
"icon":os.path.join(home, '', 'icon.png'),
"fanart":os.path.join(home, '', 'fanart.jpg'),
"type":"video",
"plot":item['host'],
"method":"torrent",
"id":item['id'],
"isFolder":False,
"playable":False,
"download":True
})
util.addMenuItems(menu)
def delID(parameters):
util.logError(str(parameters))
refreshToken()
headers={"Authorization": "Bearer "+str(xbmcaddon.Addon().getSetting('rd_access'))}
if parameters['method']=="torrent":
if xbmcgui.Dialog().yesno("Delete torrent?", line1="Do you want to delete the torret", line3=parameters['name'].encode('utf-8')):
r = requests.delete("https://api.real-debrid.com/rest/1.0/torrents/delete/"+parameters['id'], headers=headers)
try:
isError(json.loads(r.text))
except:
xbmc.executebuiltin('Container.Refresh')
else:
if xbmcgui.Dialog().yesno("Delete link?", line1="Do you want to delete the link", line3=parameters['name'].encode('utf-8')):
util.logError("https://api.real-debrid.com/rest/1.0/downloads/delete/"+parameters['id'])
r = requests.delete("https://api.real-debrid.com/rest/1.0/downloads/delete/"+parameters['id'], headers=headers)
try:
isError(json.loads(r.text))
except:
xbmc.executebuiltin('Container.Refresh')
|
gpl-2.0
|
npe9/depot_tools
|
gsutil.py
|
11
|
3665
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run a pinned gsutil."""
import argparse
import base64
import hashlib
import json
import os
import shutil
import subprocess
import sys
import urllib2
import zipfile
GSUTIL_URL = 'https://storage.googleapis.com/pub/'
API_URL = 'https://www.googleapis.com/storage/v1/b/pub/o/'
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
DEFAULT_BIN_DIR = os.path.join(THIS_DIR, 'external_bin', 'gsutil')
DEFAULT_FALLBACK_GSUTIL = os.path.join(
THIS_DIR, 'third_party', 'gsutil', 'gsutil')
class InvalidGsutilError(Exception):
pass
def download_gsutil(version, target_dir):
"""Downloads gsutil into the target_dir."""
filename = 'gsutil_%s.zip' % version
target_filename = os.path.join(target_dir, filename)
# Check if the target exists already.
if os.path.exists(target_filename):
md5_calc = hashlib.md5()
with open(target_filename, 'rb') as f:
while True:
buf = f.read(4096)
if not buf:
break
md5_calc.update(buf)
local_md5 = md5_calc.hexdigest()
metadata_url = '%s%s' % (API_URL, filename)
metadata = json.load(urllib2.urlopen(metadata_url))
remote_md5 = base64.b64decode(metadata['md5Hash'])
if local_md5 == remote_md5:
return target_filename
os.remove(target_filename)
# Do the download.
url = '%s%s' % (GSUTIL_URL, filename)
u = urllib2.urlopen(url)
with open(target_filename, 'wb') as f:
while True:
buf = u.read(4096)
if not buf:
break
f.write(buf)
return target_filename
def check_gsutil(gsutil_bin):
"""Run gsutil version and make sure it runs."""
return subprocess.call(
[sys.executable, gsutil_bin, 'version'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT) == 0
def ensure_gsutil(version, target):
bin_dir = os.path.join(target, 'gsutil_%s' % version)
gsutil_bin = os.path.join(bin_dir, 'gsutil', 'gsutil')
if os.path.isfile(gsutil_bin) and check_gsutil(gsutil_bin):
# Everything is awesome! we're all done here.
return gsutil_bin
if os.path.isdir(bin_dir):
# Clean up if we're redownloading a corrupted gsutil.
shutil.rmtree(bin_dir)
cache_dir = os.path.join(target, '.cache_dir')
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
target_zip_filename = download_gsutil(version, cache_dir)
with zipfile.ZipFile(target_zip_filename, 'r') as target_zip:
target_zip.extractall(bin_dir)
# Final check that the gsutil bin is okay. This should never fail.
if not check_gsutil(gsutil_bin):
raise InvalidGsutilError()
return gsutil_bin
def run_gsutil(force_version, fallback, target, args):
if force_version:
gsutil_bin = ensure_gsutil(force_version, target)
else:
gsutil_bin = fallback
cmd = [sys.executable, gsutil_bin] + args
return subprocess.call(cmd)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--force-version')
parser.add_argument('--fallback', default=DEFAULT_FALLBACK_GSUTIL)
parser.add_argument('--target', default=DEFAULT_BIN_DIR)
parser.add_argument('args', nargs=argparse.REMAINDER)
args, extras = parser.parse_known_args()
if args.args and args.args[0] == '--':
args.args.pop(0)
if extras:
args.args = extras + args.args
return args.force_version, args.fallback, args.target, args.args
def main():
force_version, fallback, target, args = parse_args()
return run_gsutil(force_version, fallback, target, args)
if __name__ == '__main__':
sys.exit(main())
|
bsd-3-clause
|
jodal/pykka
|
tests/test_registry.py
|
1
|
5406
|
import pytest
from pykka import ActorRegistry
pytestmark = pytest.mark.usefixtures("stop_all")
class ActorBase:
received_messages = None
def __init__(self):
super().__init__()
self.received_messages = []
def on_receive(self, message):
self.received_messages.append(message)
@pytest.fixture(scope="module")
def actor_a_class(runtime):
class ActorA(ActorBase, runtime.actor_class):
pass
return ActorA
@pytest.fixture(scope="module")
def actor_b_class(runtime):
class ActorB(ActorBase, runtime.actor_class):
pass
return ActorB
@pytest.fixture
def actor_ref(actor_a_class):
return actor_a_class.start()
@pytest.fixture
def a_actor_refs(actor_a_class):
return [actor_a_class.start() for _ in range(3)]
@pytest.fixture
def b_actor_refs(actor_b_class):
return [actor_b_class.start() for _ in range(5)]
def test_actor_is_registered_when_started(actor_ref):
assert actor_ref in ActorRegistry.get_all()
def test_actor_is_unregistered_when_stopped(actor_ref):
assert actor_ref in ActorRegistry.get_all()
actor_ref.stop()
assert actor_ref not in ActorRegistry.get_all()
def test_actor_may_be_registered_manually(actor_ref):
ActorRegistry.unregister(actor_ref)
assert actor_ref not in ActorRegistry.get_all()
ActorRegistry.register(actor_ref)
assert actor_ref in ActorRegistry.get_all()
def test_actor_may_be_unregistered_multiple_times_without_error(actor_ref):
ActorRegistry.unregister(actor_ref)
assert actor_ref not in ActorRegistry.get_all()
ActorRegistry.unregister(actor_ref)
assert actor_ref not in ActorRegistry.get_all()
ActorRegistry.register(actor_ref)
assert actor_ref in ActorRegistry.get_all()
def test_all_actors_can_be_stopped_through_registry(a_actor_refs, b_actor_refs):
assert len(ActorRegistry.get_all()) == 8
ActorRegistry.stop_all(block=True)
assert len(ActorRegistry.get_all()) == 0
def test_stop_all_stops_last_started_actor_first_if_blocking(mocker):
mocker.patch.object(ActorRegistry, "get_all")
stopped_actors = []
started_actors = [mocker.Mock(name=i) for i in range(3)]
started_actors[0].stop.side_effect = lambda *a, **kw: stopped_actors.append(
started_actors[0]
)
started_actors[1].stop.side_effect = lambda *a, **kw: stopped_actors.append(
started_actors[1]
)
started_actors[2].stop.side_effect = lambda *a, **kw: stopped_actors.append(
started_actors[2]
)
ActorRegistry.get_all.return_value = started_actors
ActorRegistry.stop_all(block=True)
assert stopped_actors[0] == started_actors[2]
assert stopped_actors[1] == started_actors[1]
assert stopped_actors[2] == started_actors[0]
def test_actors_may_be_looked_up_by_class(actor_a_class, a_actor_refs, b_actor_refs):
result = ActorRegistry.get_by_class(actor_a_class)
for a_actor in a_actor_refs:
assert a_actor in result
for b_actor in b_actor_refs:
assert b_actor not in result
def test_actors_may_be_looked_up_by_superclass(
actor_a_class, a_actor_refs, b_actor_refs
):
result = ActorRegistry.get_by_class(actor_a_class)
for a_actor in a_actor_refs:
assert a_actor in result
for b_actor in b_actor_refs:
assert b_actor not in result
def test_actors_may_be_looked_up_by_class_name(
actor_a_class, a_actor_refs, b_actor_refs
):
result = ActorRegistry.get_by_class_name("ActorA")
for a_actor in a_actor_refs:
assert a_actor in result
for b_actor in b_actor_refs:
assert b_actor not in result
def test_actors_may_be_looked_up_by_urn(actor_ref):
result = ActorRegistry.get_by_urn(actor_ref.actor_urn)
assert result == actor_ref
def test_get_by_urn_returns_none_if_not_found():
result = ActorRegistry.get_by_urn("urn:foo:bar")
assert result is None
def test_broadcast_sends_message_to_all_actors_if_no_target(a_actor_refs, b_actor_refs):
ActorRegistry.broadcast({"command": "foo"})
running_actors = ActorRegistry.get_all()
assert running_actors
for actor_ref in running_actors:
received_messages = actor_ref.proxy().received_messages.get()
assert {"command": "foo"} in received_messages
def test_broadcast_sends_message_to_all_actors_of_given_class(
actor_a_class, actor_b_class
):
ActorRegistry.broadcast({"command": "foo"}, target_class=actor_a_class)
for actor_ref in ActorRegistry.get_by_class(actor_a_class):
received_messages = actor_ref.proxy().received_messages.get()
assert {"command": "foo"} in received_messages
for actor_ref in ActorRegistry.get_by_class(actor_b_class):
received_messages = actor_ref.proxy().received_messages.get()
assert {"command": "foo"} not in received_messages
def test_broadcast_sends_message_to_all_actors_of_given_class_name(
actor_a_class, actor_b_class
):
ActorRegistry.broadcast({"command": "foo"}, target_class="ActorA")
for actor_ref in ActorRegistry.get_by_class(actor_a_class):
received_messages = actor_ref.proxy().received_messages.get()
assert {"command": "foo"} in received_messages
for actor_ref in ActorRegistry.get_by_class(actor_b_class):
received_messages = actor_ref.proxy().received_messages.get()
assert {"command": "foo"} not in received_messages
|
apache-2.0
|
Amechi101/concepteur-market-app
|
venv/lib/python2.7/site-packages/django/templatetags/future.py
|
130
|
1640
|
from django.template import Library
from django.template import defaulttags
register = Library()
@register.tag
def ssi(parser, token):
# Used for deprecation path during 1.3/1.4, will be removed in 2.0
return defaulttags.ssi(parser, token)
@register.tag
def url(parser, token):
# Used for deprecation path during 1.3/1.4, will be removed in 2.0
return defaulttags.url(parser, token)
@register.tag
def cycle(parser, token):
"""
This is the future version of `cycle` with auto-escaping.
By default all strings are escaped.
If you want to disable auto-escaping of variables you can use::
{% autoescape off %}
{% cycle var1 var2 var3 as somecycle %}
{% autoescape %}
Or if only some variables should be escaped, you can use::
{% cycle var1 var2|safe var3|safe as somecycle %}
"""
return defaulttags.cycle(parser, token, escape=True)
@register.tag
def firstof(parser, token):
"""
This is the future version of `firstof` with auto-escaping.
This is equivalent to::
{% if var1 %}
{{ var1 }}
{% elif var2 %}
{{ var2 }}
{% elif var3 %}
{{ var3 }}
{% endif %}
If you want to disable auto-escaping of variables you can use::
{% autoescape off %}
{% firstof var1 var2 var3 "<strong>fallback value</strong>" %}
{% autoescape %}
Or if only some variables should be escaped, you can use::
{% firstof var1 var2|safe var3 "<strong>fallback value</strong>"|safe %}
"""
return defaulttags.firstof(parser, token, escape=True)
|
mit
|
myarjunar/QGIS
|
python/plugins/processing/gui/ScriptEdit.py
|
1
|
7614
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
ScriptEdit.py
---------------------
Date : April 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'April 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtCore import Qt
from qgis.PyQt.QtGui import QFont, QColor, QKeySequence
from qgis.PyQt.QtWidgets import QShortcut
from qgis.core import QgsApplication, QgsSettings
from qgis.PyQt.Qsci import QsciScintilla, QsciLexerPython, QsciAPIs
from processing.gui.LexerR import LexerR
class ScriptEdit(QsciScintilla):
LEXER_PYTHON = 0
LEXER_R = 1
def __init__(self, parent=None):
QsciScintilla.__init__(self, parent)
self.lexer = None
self.api = None
self.lexerType = -1
self.setCommonOptions()
self.initShortcuts()
def setCommonOptions(self):
# Enable non-ASCII characters
self.setUtf8(True)
# Default font
font = QFont()
font.setFamily('Courier')
font.setFixedPitch(True)
font.setPointSize(20)
self.setFont(font)
self.setMarginsFont(font)
self.initLexer()
self.setBraceMatching(QsciScintilla.SloppyBraceMatch)
self.setWrapMode(QsciScintilla.WrapWord)
self.setWrapVisualFlags(QsciScintilla.WrapFlagByText,
QsciScintilla.WrapFlagNone, 4)
self.setSelectionForegroundColor(QColor('#2e3436'))
self.setSelectionBackgroundColor(QColor('#babdb6'))
# Show line numbers
self.setMarginWidth(1, '000')
self.setMarginLineNumbers(1, True)
self.setMarginsForegroundColor(QColor('#2e3436'))
self.setMarginsBackgroundColor(QColor('#babdb6'))
# Highlight current line
self.setCaretLineVisible(True)
self.setCaretLineBackgroundColor(QColor('#d3d7cf'))
# Folding
self.setFolding(QsciScintilla.BoxedTreeFoldStyle)
self.setFoldMarginColors(QColor('#d3d7cf'), QColor('#d3d7cf'))
# Mark column 80 with vertical line
self.setEdgeMode(QsciScintilla.EdgeLine)
self.setEdgeColumn(80)
self.setEdgeColor(QColor('#eeeeec'))
# Indentation
self.setAutoIndent(True)
self.setIndentationsUseTabs(False)
self.setIndentationWidth(4)
self.setTabIndents(True)
self.setBackspaceUnindents(True)
self.setTabWidth(4)
# Autocomletion
self.setAutoCompletionThreshold(2)
self.setAutoCompletionSource(QsciScintilla.AcsAPIs)
self.setFonts(10)
def setFonts(self, size):
# Load font from Python console settings
settings = QgsSettings()
fontName = settings.value('pythonConsole/fontfamilytext', 'Monospace')
fontSize = int(settings.value('pythonConsole/fontsize', size))
self.defaultFont = QFont(fontName)
self.defaultFont.setFixedPitch(True)
self.defaultFont.setPointSize(fontSize)
self.defaultFont.setStyleHint(QFont.TypeWriter)
self.defaultFont.setStretch(QFont.SemiCondensed)
self.defaultFont.setLetterSpacing(QFont.PercentageSpacing, 87.0)
self.defaultFont.setBold(False)
self.boldFont = QFont(self.defaultFont)
self.boldFont.setBold(True)
self.italicFont = QFont(self.defaultFont)
self.italicFont.setItalic(True)
self.setFont(self.defaultFont)
self.setMarginsFont(self.defaultFont)
def initShortcuts(self):
(ctrl, shift) = (self.SCMOD_CTRL << 16, self.SCMOD_SHIFT << 16)
# Disable some shortcuts
self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('D') + ctrl)
self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('L') + ctrl)
self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('L') + ctrl +
shift)
self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('T') + ctrl)
# self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord("Z") + ctrl)
# self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord("Y") + ctrl)
# Use Ctrl+Space for autocompletion
self.shortcutAutocomplete = QShortcut(QKeySequence(Qt.CTRL +
Qt.Key_Space), self)
self.shortcutAutocomplete.setContext(Qt.WidgetShortcut)
self.shortcutAutocomplete.activated.connect(self.autoComplete)
def autoComplete(self):
self.autoCompleteFromAll()
def setLexerType(self, lexerType):
self.lexerType = lexerType
self.initLexer()
def initLexer(self):
if self.lexerType == self.LEXER_PYTHON:
self.lexer = QsciLexerPython()
colorDefault = QColor('#2e3436')
colorComment = QColor('#c00')
colorCommentBlock = QColor('#3465a4')
colorNumber = QColor('#4e9a06')
colorType = QColor('#4e9a06')
colorKeyword = QColor('#204a87')
colorString = QColor('#ce5c00')
self.lexer.setDefaultFont(self.defaultFont)
self.lexer.setDefaultColor(colorDefault)
self.lexer.setColor(colorComment, 1)
self.lexer.setColor(colorNumber, 2)
self.lexer.setColor(colorString, 3)
self.lexer.setColor(colorString, 4)
self.lexer.setColor(colorKeyword, 5)
self.lexer.setColor(colorString, 6)
self.lexer.setColor(colorString, 7)
self.lexer.setColor(colorType, 8)
self.lexer.setColor(colorCommentBlock, 12)
self.lexer.setColor(colorString, 15)
self.lexer.setFont(self.italicFont, 1)
self.lexer.setFont(self.boldFont, 5)
self.lexer.setFont(self.boldFont, 8)
self.lexer.setFont(self.italicFont, 12)
self.api = QsciAPIs(self.lexer)
settings = QgsSettings()
useDefaultAPI = bool(settings.value('pythonConsole/preloadAPI',
True))
if useDefaultAPI:
# Load QGIS API shipped with Python console
self.api.loadPrepared(
os.path.join(QgsApplication.pkgDataPath(),
'python', 'qsci_apis', 'pyqgis.pap'))
else:
# Load user-defined API files
apiPaths = settings.value('pythonConsole/userAPI', [])
for path in apiPaths:
self.api.load(path)
self.api.prepare()
self.lexer.setAPIs(self.api)
elif self.lexerType == self.LEXER_R:
# R lexer
self.lexer = LexerR()
self.setLexer(self.lexer)
|
gpl-2.0
|
rockfruit/bika.lims
|
bika/lims/browser/viewlets.py
|
1
|
5596
|
# This file is part of Bika LIMS
#
# Copyright 2011-2016 by it's authors.
# Some rights reserved. See LICENSE.txt, AUTHORS.txt.
import json
import urllib
import time
from Products.CMFCore.utils import getToolByName
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from plone import api
from plone.app.layout.viewlets.common import ViewletBase
from zope.component import getMultiAdapter
from bika.lims import logger
class DocumentActionsViewlet(ViewletBase):
"""Overload the default to print pretty icons
"""
index = ViewPageTemplateFile("templates/document_actions.pt")
def render(self):
portal_factory = getToolByName(self.context, 'portal_factory')
if portal_factory.isTemporary(self.context):
return self.index()
self.actions = []
portal_actions = getToolByName(self.context, 'portal_actions')
actions = portal_actions.listFilteredActionsFor(self.context)
if 'document_actions' in actions:
for action in actions['document_actions']:
self.actions.append(action)
return self.index()
class NewVersionsViewlet(ViewletBase):
""" Handle notifications related to new version of Bika LIMS
1) Check pypi for new version
2) Check prefs to see if upgrade steps are required.
"""
index = ViewPageTemplateFile("templates/new_version.pt")
def get_versions(self):
"""Configure self.versions, a list of product versions
from portal.quickinstaller
"""
self.versions = {}
qi = self.context.portal_quickinstaller
for key in qi.keys():
self.versions[key] = qi.getProductVersion(key)
def check_new_version(self):
"""Look for new updates at pypi
"""
self.current_version = self.versions['bika.lims']
if not self.current_version:
self.has_new_version = False
return
url = "https://pypi.python.org/pypi/bika.lims/json"
try:
jsonstr = urllib.urlopen(url).read()
self.pypi = json.loads(jsonstr)
v = self.new_version = self.pypi['info']['version']
self.new_date = \
self.pypi['releases'][v][0]['upload_time'].split('T')[0]
except Exception as e:
logger.info("Failed to retrieve new version info: %s" % e)
v = self.current_version
self.new_date = ""
self.has_new_version = v > self.current_version
def check_new_upgrade_step(self):
"""Warn about upgrade steps that have not been run. This will override
the users choice in settings: un-executed upgrade steps are always
alerted.
"""
qi = self.context.portal_quickinstaller
self.info = qi.upgradeInfo('bika.lims')
if self.info['installedVersion'] < self.info['newVersion']:
self.has_upgrade_step = True
else:
self.has_upgrade_step = False
def check_session(self):
"""Return False if the session hint claims that we already checked.
Return True if the session has no record, or if more than one day has
passed since we last checked.
"""
et = time.time()
try:
sdm = self.context.session_data_manager
except AttributeError:
# While testing, the session data manager is not yet instantiated.
return False
session = sdm.getSessionData(create=True)
diff = et - session.get('bika.lims-version-check', et)
if diff > 86400 or diff == 0:
session['bika.lims-version-check'] = et
return True
else:
return False
def render(self):
if not self.check_session():
return ""
self.get_versions()
self.check_new_version()
self.check_new_upgrade_step()
mtool = getToolByName(self.context, 'portal_membership')
member = mtool.getAuthenticatedMember()
roles = member.getRoles()
allowed = 'LabManager' in roles or 'Manager' in roles
if allowed \
and self.context.bika_setup.getShowNewReleasesInfo() \
and self.has_new_version:
return self.index()
elif allowed and self.has_upgrade_step:
return self.index()
else:
return ""
class PathBarViewlet(ViewletBase):
index = ViewPageTemplateFile('templates/path_bar.pt')
def update(self):
super(PathBarViewlet, self).update()
self.is_rtl = self.portal_state.is_rtl()
breadcrumbs_view = getMultiAdapter((self.context, self.request),
name='breadcrumbs_view')
self.breadcrumbs = breadcrumbs_view.breadcrumbs()
class AuthenticatorViewlet(ViewletBase):
index = ViewPageTemplateFile('templates/authenticator.pt')
class ImportButtonViewlet(ViewletBase):
index = ViewPageTemplateFile('templates/import_button_viewlet.pt')
def changeStatus(self):
workflow = api.portal.get_tool("portal_workflow")
state = api.content.get_state(self.context)
available_transions = workflow.getTransitionsFor(self.context)
self.import_buttons = []
contextURL = self.context.absolute_url()
changeWorkFlow = 'content_status_modify?workflow_action='
for state in available_transions:
url = '%s/%s%s' % (contextURL, changeWorkFlow, state['id'])
self.import_buttons.append({'state': state['name'], 'url': url})
return self.import_buttons
|
agpl-3.0
|
tst-lsavoie/earthenterprise
|
earth_enterprise/src/google/protobuf-py/google/protobuf/internal/type_checkers.py
|
9
|
12112
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Provides type checking routines.
This module defines type checking utilities in the forms of dictionaries:
VALUE_CHECKERS: A dictionary of field types and a value validation object.
TYPE_TO_BYTE_SIZE_FN: A dictionary with field types and a size computing
function.
TYPE_TO_SERIALIZE_METHOD: A dictionary with field types and serialization
function.
FIELD_TYPE_TO_WIRE_TYPE: A dictionary with field typed and their
coresponding wire types.
TYPE_TO_DESERIALIZE_METHOD: A dictionary with field types and deserialization
function.
"""
from google.protobuf.internal import decoder
from google.protobuf.internal import encoder
from google.protobuf.internal import wire_format
from google.protobuf import descriptor
_FieldDescriptor = descriptor.FieldDescriptor
def GetTypeChecker(cpp_type, field_type):
"""Returns a type checker for a message field of the specified types.
Args:
cpp_type: C++ type of the field (see descriptor.py).
field_type: Protocol message field type (see descriptor.py).
Returns:
An instance of TypeChecker which can be used to verify the types
of values assigned to a field of the specified type.
"""
if (cpp_type == _FieldDescriptor.CPPTYPE_STRING and
field_type == _FieldDescriptor.TYPE_STRING):
return UnicodeValueChecker()
return _VALUE_CHECKERS[cpp_type]
# None of the typecheckers below make any attempt to guard against people
# subclassing builtin types and doing weird things. We're not trying to
# protect against malicious clients here, just people accidentally shooting
# themselves in the foot in obvious ways.
class TypeChecker(object):
"""Type checker used to catch type errors as early as possible
when the client is setting scalar fields in protocol messages.
"""
def __init__(self, *acceptable_types):
self._acceptable_types = acceptable_types
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, self._acceptable_types):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), self._acceptable_types))
raise TypeError(message)
# IntValueChecker and its subclasses perform integer type-checks
# and bounds-checks.
class IntValueChecker(object):
"""Checker used for integer fields. Performs type-check and range check."""
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, (int, long)):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), (int, long)))
raise TypeError(message)
if not self._MIN <= proposed_value <= self._MAX:
raise ValueError('Value out of range: %d' % proposed_value)
class UnicodeValueChecker(object):
"""Checker used for string fields."""
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, (str, unicode)):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), (str, unicode)))
raise TypeError(message)
# If the value is of type 'str' make sure that it is in 7-bit ASCII
# encoding.
if isinstance(proposed_value, str):
try:
unicode(proposed_value, 'ascii')
except UnicodeDecodeError:
raise ValueError('%.1024r has type str, but isn\'t in 7-bit ASCII '
'encoding. Non-ASCII strings must be converted to '
'unicode objects before being added.' %
(proposed_value))
class Int32ValueChecker(IntValueChecker):
# We're sure to use ints instead of longs here since comparison may be more
# efficient.
_MIN = -2147483648
_MAX = 2147483647
class Uint32ValueChecker(IntValueChecker):
_MIN = 0
_MAX = (1 << 32) - 1
class Int64ValueChecker(IntValueChecker):
_MIN = -(1 << 63)
_MAX = (1 << 63) - 1
class Uint64ValueChecker(IntValueChecker):
_MIN = 0
_MAX = (1 << 64) - 1
# Type-checkers for all scalar CPPTYPEs.
_VALUE_CHECKERS = {
_FieldDescriptor.CPPTYPE_INT32: Int32ValueChecker(),
_FieldDescriptor.CPPTYPE_INT64: Int64ValueChecker(),
_FieldDescriptor.CPPTYPE_UINT32: Uint32ValueChecker(),
_FieldDescriptor.CPPTYPE_UINT64: Uint64ValueChecker(),
_FieldDescriptor.CPPTYPE_DOUBLE: TypeChecker(
float, int, long),
_FieldDescriptor.CPPTYPE_FLOAT: TypeChecker(
float, int, long),
_FieldDescriptor.CPPTYPE_BOOL: TypeChecker(bool, int),
_FieldDescriptor.CPPTYPE_ENUM: Int32ValueChecker(),
_FieldDescriptor.CPPTYPE_STRING: TypeChecker(str),
}
# Map from field type to a function F, such that F(field_num, value)
# gives the total byte size for a value of the given type. This
# byte size includes tag information and any other additional space
# associated with serializing "value".
TYPE_TO_BYTE_SIZE_FN = {
_FieldDescriptor.TYPE_DOUBLE: wire_format.DoubleByteSize,
_FieldDescriptor.TYPE_FLOAT: wire_format.FloatByteSize,
_FieldDescriptor.TYPE_INT64: wire_format.Int64ByteSize,
_FieldDescriptor.TYPE_UINT64: wire_format.UInt64ByteSize,
_FieldDescriptor.TYPE_INT32: wire_format.Int32ByteSize,
_FieldDescriptor.TYPE_FIXED64: wire_format.Fixed64ByteSize,
_FieldDescriptor.TYPE_FIXED32: wire_format.Fixed32ByteSize,
_FieldDescriptor.TYPE_BOOL: wire_format.BoolByteSize,
_FieldDescriptor.TYPE_STRING: wire_format.StringByteSize,
_FieldDescriptor.TYPE_GROUP: wire_format.GroupByteSize,
_FieldDescriptor.TYPE_MESSAGE: wire_format.MessageByteSize,
_FieldDescriptor.TYPE_BYTES: wire_format.BytesByteSize,
_FieldDescriptor.TYPE_UINT32: wire_format.UInt32ByteSize,
_FieldDescriptor.TYPE_ENUM: wire_format.EnumByteSize,
_FieldDescriptor.TYPE_SFIXED32: wire_format.SFixed32ByteSize,
_FieldDescriptor.TYPE_SFIXED64: wire_format.SFixed64ByteSize,
_FieldDescriptor.TYPE_SINT32: wire_format.SInt32ByteSize,
_FieldDescriptor.TYPE_SINT64: wire_format.SInt64ByteSize
}
# Maps from field types to encoder constructors.
TYPE_TO_ENCODER = {
_FieldDescriptor.TYPE_DOUBLE: encoder.DoubleEncoder,
_FieldDescriptor.TYPE_FLOAT: encoder.FloatEncoder,
_FieldDescriptor.TYPE_INT64: encoder.Int64Encoder,
_FieldDescriptor.TYPE_UINT64: encoder.UInt64Encoder,
_FieldDescriptor.TYPE_INT32: encoder.Int32Encoder,
_FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Encoder,
_FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Encoder,
_FieldDescriptor.TYPE_BOOL: encoder.BoolEncoder,
_FieldDescriptor.TYPE_STRING: encoder.StringEncoder,
_FieldDescriptor.TYPE_GROUP: encoder.GroupEncoder,
_FieldDescriptor.TYPE_MESSAGE: encoder.MessageEncoder,
_FieldDescriptor.TYPE_BYTES: encoder.BytesEncoder,
_FieldDescriptor.TYPE_UINT32: encoder.UInt32Encoder,
_FieldDescriptor.TYPE_ENUM: encoder.EnumEncoder,
_FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Encoder,
_FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Encoder,
_FieldDescriptor.TYPE_SINT32: encoder.SInt32Encoder,
_FieldDescriptor.TYPE_SINT64: encoder.SInt64Encoder,
}
# Maps from field types to sizer constructors.
TYPE_TO_SIZER = {
_FieldDescriptor.TYPE_DOUBLE: encoder.DoubleSizer,
_FieldDescriptor.TYPE_FLOAT: encoder.FloatSizer,
_FieldDescriptor.TYPE_INT64: encoder.Int64Sizer,
_FieldDescriptor.TYPE_UINT64: encoder.UInt64Sizer,
_FieldDescriptor.TYPE_INT32: encoder.Int32Sizer,
_FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Sizer,
_FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Sizer,
_FieldDescriptor.TYPE_BOOL: encoder.BoolSizer,
_FieldDescriptor.TYPE_STRING: encoder.StringSizer,
_FieldDescriptor.TYPE_GROUP: encoder.GroupSizer,
_FieldDescriptor.TYPE_MESSAGE: encoder.MessageSizer,
_FieldDescriptor.TYPE_BYTES: encoder.BytesSizer,
_FieldDescriptor.TYPE_UINT32: encoder.UInt32Sizer,
_FieldDescriptor.TYPE_ENUM: encoder.EnumSizer,
_FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Sizer,
_FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Sizer,
_FieldDescriptor.TYPE_SINT32: encoder.SInt32Sizer,
_FieldDescriptor.TYPE_SINT64: encoder.SInt64Sizer,
}
# Maps from field type to a decoder constructor.
TYPE_TO_DECODER = {
_FieldDescriptor.TYPE_DOUBLE: decoder.DoubleDecoder,
_FieldDescriptor.TYPE_FLOAT: decoder.FloatDecoder,
_FieldDescriptor.TYPE_INT64: decoder.Int64Decoder,
_FieldDescriptor.TYPE_UINT64: decoder.UInt64Decoder,
_FieldDescriptor.TYPE_INT32: decoder.Int32Decoder,
_FieldDescriptor.TYPE_FIXED64: decoder.Fixed64Decoder,
_FieldDescriptor.TYPE_FIXED32: decoder.Fixed32Decoder,
_FieldDescriptor.TYPE_BOOL: decoder.BoolDecoder,
_FieldDescriptor.TYPE_STRING: decoder.StringDecoder,
_FieldDescriptor.TYPE_GROUP: decoder.GroupDecoder,
_FieldDescriptor.TYPE_MESSAGE: decoder.MessageDecoder,
_FieldDescriptor.TYPE_BYTES: decoder.BytesDecoder,
_FieldDescriptor.TYPE_UINT32: decoder.UInt32Decoder,
_FieldDescriptor.TYPE_ENUM: decoder.EnumDecoder,
_FieldDescriptor.TYPE_SFIXED32: decoder.SFixed32Decoder,
_FieldDescriptor.TYPE_SFIXED64: decoder.SFixed64Decoder,
_FieldDescriptor.TYPE_SINT32: decoder.SInt32Decoder,
_FieldDescriptor.TYPE_SINT64: decoder.SInt64Decoder,
}
# Maps from field type to expected wiretype.
FIELD_TYPE_TO_WIRE_TYPE = {
_FieldDescriptor.TYPE_DOUBLE: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_FLOAT: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_INT64: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_UINT64: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_INT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_FIXED64: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_FIXED32: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_BOOL: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_STRING:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_GROUP: wire_format.WIRETYPE_START_GROUP,
_FieldDescriptor.TYPE_MESSAGE:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_BYTES:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_UINT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_ENUM: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_SFIXED32: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_SFIXED64: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_SINT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_SINT64: wire_format.WIRETYPE_VARINT,
}
|
apache-2.0
|
spnow/grr
|
lib/flows/general/services.py
|
2
|
2574
|
#!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
"""Get running/installed services."""
from grr.lib import aff4
from grr.lib import flow
from grr.lib import rdfvalue
from grr.proto import flows_pb2
from grr.proto import jobs_pb2
class ServiceInformation(rdfvalue.RDFProtoStruct):
protobuf = jobs_pb2.ServiceInformation
class EnumerateServicesArgs(rdfvalue.RDFProtoStruct):
protobuf = flows_pb2.EnumerateServicesArgs
# TODO(user): Mostly replaced with WindowsDrivers artifact. Remove this
# flow once we can also do the binary download with artifacts.
class EnumerateServices(flow.GRRFlow):
"""Enumerated windows services and kernel drivers using WMI.
Optionally also download the binaries automatically.
"""
category = "/Services/"
behaviours = flow.GRRFlow.behaviours + "Windows"
args_type = EnumerateServicesArgs
@flow.StateHandler(next_state=["StoreServices", "StoreWMIServices"])
def Start(self):
"""Setup output collections and issue WMI call."""
client = aff4.FACTORY.Open(self.client_id, token=self.token)
system = client.Get(client.Schema.SYSTEM)
# if system is None we'll try to run the flow anyway since it might work.
if system == "Windows":
self.CallClient("WmiQuery", query="Select * from Win32_SystemDriver",
next_state="StoreWMIServices")
else:
self.CallClient("EnumerateRunningServices", next_state="StoreServices")
@flow.StateHandler()
def StoreServices(self, responses):
"""Store services in ServiceCollection."""
if not responses.success:
raise flow.FlowError(str(responses.status))
for response in responses:
self.SendReply(response)
@flow.StateHandler(next_state="End")
def StoreWMIServices(self, responses):
"""This stores the processes."""
if not responses.success:
# If we failed with the wmi query we can not continue.
raise flow.FlowError("Error during WMI query %s" % responses.status)
paths = []
for response in responses:
service_entry = rdfvalue.ServiceInformation()
service_entry.wmi_information = response
service_entry.name = response.GetItem("Name")
service_entry.description = response.GetItem("Description")
service_entry.state = response.GetItem("State")
driver_path = response.GetItem("PathName")
if driver_path:
paths.append(driver_path)
self.SendReply(service_entry)
if paths:
self.CallFlow("FetchFiles", paths=paths, pathtype=self.args.pathtype,
next_state="End")
|
apache-2.0
|
fgesora/odoo
|
addons/l10n_be_coda/wizard/__init__.py
|
439
|
1098
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_coda_import
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
boomsbloom/dtm-fmri
|
DTM/for_gensim/lib/python2.7/site-packages/mpl_toolkits/axisartist/axis_artist.py
|
7
|
52735
|
"""
axis_artist.py module provides axis-related artists. They are
* axis line
* tick lines
* tick labels
* axis label
* grid lines
The main artist class is a AxisArtist and a GridlinesCollection. The
GridlinesCollection is responsible for drawing grid lines and the
AxisArtist is responsible for all other artists. The AxisArtist class
has attributes that are associated with each type of artists.
* line : axis line
* major_ticks : major tick lines
* major_ticklabels : major tick labels
* minor_ticks : minor tick lines
* minor_ticklabels : minor tick labels
* label : axis label
Typically, the AxisArtist associated with a axes will be accessed with
the *axis* dictionary of the axes, i.e., the AxisArtist for the bottom
axis is
ax.axis["bottom"]
where *ax* is an instance of axes (mpl_toolkits.axislines.Axes). Thus,
ax.axis["bottom"].line is an artist associated with the axis line, and
ax.axis["bottom"].major_ticks is an artist associated with the major tick
lines.
You can change the colors, fonts, line widths, etc. of these artists
by calling suitable set method. For example, to change the color of the major
ticks of the bottom axis to red,
ax.axis["bottom"].major_ticks.set_color("r")
However, things like the locations of ticks, and their ticklabels need
to be changed from the side of the grid_helper.
axis_direction
--------------
AxisArtist, AxisLabel, TickLabels have *axis_direction* attribute,
which adjusts the location, angle, etc.,. The *axis_direction* must be
one of [left, right, bottom, top] and they follow the matplotlib
convention for the rectangle axis.
For example, for the *bottom* axis (the left and right is relative to
the direction of the increasing coordinate),
* ticklabels and axislabel are on the right
* ticklabels and axislabel have text angle of 0
* ticklabels are baseline, center-aligned
* axislabel is top, center-aligned
The text angles are actually relative to (90 + angle of the direction
to the ticklabel), which gives 0 for bottom axis.
left bottom right top
ticklabels location left right right left
axislabel location left right right left
ticklabels angle 90 0 -90 180
axislabel angle 180 0 0 180
ticklabel va center baseline center baseline
axislabel va center top center bottom
ticklabel ha right center right center
axislabel ha right center right center
Ticks are by default direct opposite side of the ticklabels. To make
ticks to the same side of the ticklabels,
ax.axis["bottom"].major_ticks.set_ticks_out(True)
Following attributes can be customized (use set_xxx method)
* Ticks : ticksize, tick_out
* TickLabels : pad
* AxisLabel : pad
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
# FIXME :
# * : angles are given in data coordinate - need to convert it to canvas coordinate
import matplotlib.axes as maxes
import matplotlib.artist as martist
import matplotlib.text as mtext
import matplotlib.font_manager as font_manager
from matplotlib.path import Path
from matplotlib.transforms import Affine2D, ScaledTranslation, \
IdentityTransform, TransformedPath, Bbox
from matplotlib.collections import LineCollection
from matplotlib import rcParams
from matplotlib.artist import allow_rasterization
import warnings
import numpy as np
import matplotlib.lines as mlines
from .axisline_style import AxislineStyle
class BezierPath(mlines.Line2D):
def __init__(self, path, *kl, **kw):
mlines.Line2D.__init__(self, [], [], *kl, **kw)
self._path = path
self._invalid = False
def recache(self):
self._transformed_path = TransformedPath(self._path, self.get_transform())
self._invalid = False
def set_path(self, path):
self._path = path
self._invalid = True
def draw(self, renderer):
if self._invalid:
self.recache()
if not self._visible: return
renderer.open_group('line2d')
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_foreground(self._color)
gc.set_antialiased(self._antialiased)
gc.set_linewidth(self._linewidth)
gc.set_alpha(self._alpha)
if self.is_dashed():
cap = self._dashcapstyle
join = self._dashjoinstyle
else:
cap = self._solidcapstyle
join = self._solidjoinstyle
gc.set_joinstyle(join)
gc.set_capstyle(cap)
funcname = self._lineStyles.get(self._linestyle, '_draw_nothing')
if funcname != '_draw_nothing':
tpath, affine = self._transformed_path.get_transformed_path_and_affine()
lineFunc = getattr(self, funcname)
lineFunc(renderer, gc, tpath, affine.frozen())
gc.restore()
renderer.close_group('line2d')
class UnimplementedException(Exception):
pass
from matplotlib.artist import Artist
class AttributeCopier(object):
def __init__(self, ref_artist, klass=Artist):
self._klass = klass
self._ref_artist = ref_artist
super(AttributeCopier, self).__init__()
def set_ref_artist(self, artist):
self._ref_artist = artist
def get_ref_artist(self):
raise RuntimeError("get_ref_artist must overridden")
#return self._ref_artist
def get_attribute_from_ref_artist(self, attr_name, default_value):
get_attr_method_name = "get_"+attr_name
c = getattr(self._klass, get_attr_method_name)(self)
if c == 'auto':
ref_artist = self.get_ref_artist()
if ref_artist:
attr = getattr(ref_artist,
get_attr_method_name)()
return attr
else:
return default_value
return c
from matplotlib.lines import Line2D
class Ticks(Line2D, AttributeCopier):
"""
Ticks are derived from Line2D, and note that ticks themselves
are markers. Thus, you should use set_mec, set_mew, etc.
To change the tick size (length), you need to use
set_ticksize. To change the direction of the ticks (ticks are
in opposite direction of ticklabels by default), use
set_tick_out(False).
"""
def __init__(self, ticksize, tick_out=False, **kwargs):
self._ticksize = ticksize
self.locs_angles_labels = []
self.set_tick_out(tick_out)
self._axis = kwargs.pop("axis", None)
if self._axis is not None:
if "color" not in kwargs:
kwargs["color"] = "auto"
if ("mew" not in kwargs) and ("markeredgewidth" not in kwargs):
kwargs["markeredgewidth"] = "auto"
Line2D.__init__(self, [0.], [0.], **kwargs)
AttributeCopier.__init__(self, self._axis, klass=Line2D)
self.set_snap(True)
def get_ref_artist(self):
#return self._ref_artist.get_ticklines()[0]
return self._ref_artist.majorTicks[0].tick1line
def get_color(self):
return self.get_attribute_from_ref_artist("color", "k")
def get_markeredgecolor(self):
if self._markeredgecolor == 'auto':
return self.get_color()
else:
return self._markeredgecolor
def get_markeredgewidth(self):
return self.get_attribute_from_ref_artist("markeredgewidth", .5)
def set_tick_out(self, b):
"""
set True if tick need to be rotated by 180 degree.
"""
self._tick_out = b
def get_tick_out(self):
"""
Return True if the tick will be rotated by 180 degree.
"""
return self._tick_out
def set_ticksize(self, ticksize):
"""
set length of the ticks in points.
"""
self._ticksize = ticksize
def get_ticksize(self):
"""
Return length of the ticks in points.
"""
return self._ticksize
def set_locs_angles(self, locs_angles):
self.locs_angles = locs_angles
def _update(self, renderer):
pass
_tickvert_path = Path([[0., 0.], [1., 0.]])
def draw(self, renderer):
if not self.get_visible():
return
self._update(renderer) # update the tick
size = self._ticksize
path_trans = self.get_transform()
# set gc : copied from lines.py
# gc = renderer.new_gc()
# self._set_gc_clip(gc)
# gc.set_foreground(self.get_color())
# gc.set_antialiased(self._antialiased)
# gc.set_linewidth(self._linewidth)
# gc.set_alpha(self._alpha)
# if self.is_dashed():
# cap = self._dashcapstyle
# join = self._dashjoinstyle
# else:
# cap = self._solidcapstyle
# join = self._solidjoinstyle
# gc.set_joinstyle(join)
# gc.set_capstyle(cap)
# gc.set_snap(self.get_snap())
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_foreground(self.get_markeredgecolor())
gc.set_linewidth(self.get_markeredgewidth())
gc.set_alpha(self._alpha)
offset = renderer.points_to_pixels(size)
marker_scale = Affine2D().scale(offset, offset)
if self.get_tick_out():
add_angle = 180
else:
add_angle = 0
marker_rotation = Affine2D()
marker_transform = marker_scale + marker_rotation
for loc, angle in self.locs_angles:
marker_rotation.rotate_deg(angle+add_angle)
locs = path_trans.transform_non_affine(np.array([loc, loc]))
renderer.draw_markers(gc, self._tickvert_path, marker_transform,
Path(locs), path_trans.get_affine())
marker_rotation.clear()
gc.restore()
def test_ticks():
import matplotlib.pyplot as plt
fig = plt.figure(1)
fig.clf()
ax = fig.add_subplot(111)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ticks = Ticks(ticksize=10, axis=ax.xaxis)
ax.add_artist(ticks)
locs_angles = [((0.2, 0.), 90),
((0.4, 0.), 120)]
ticks.set_locs_angles(locs_angles)
plt.draw()
class LabelBase(mtext.Text):
"""
A base class for AxisLabel and TickLabels. The position and angle
of the text are calculated by to offset_ref_angle,
text_ref_angle, and offset_radius attributes.
"""
def __init__(self, *kl, **kwargs):
self.locs_angles_labels = []
self._ref_angle = 0
self._offset_radius = 0.
super(LabelBase, self).__init__(*kl,
**kwargs)
self.set_rotation_mode("anchor")
self._text_follow_ref_angle = True
#self._offset_ref_angle = 0
def _set_ref_angle(self, a):
self._ref_angle = a
def _get_ref_angle(self):
return self._ref_angle
def _get_text_ref_angle(self):
if self._text_follow_ref_angle:
return self._get_ref_angle()+90
else:
return 0 #self.get_ref_angle()
def _get_offset_ref_angle(self):
return self._get_ref_angle()
def _set_offset_radius(self, offset_radius):
self._offset_radius = offset_radius
def _get_offset_radius(self):
return self._offset_radius
_get_opposite_direction = {"left":"right",
"right":"left",
"top":"bottom",
"bottom":"top"}.__getitem__
def _update(self, renderer):
pass
def draw(self, renderer):
if not self.get_visible(): return
self._update(renderer)
# save original and adjust some properties
tr = self.get_transform()
angle_orig = self.get_rotation()
offset_tr = Affine2D()
self.set_transform(tr+offset_tr)
text_ref_angle = self._get_text_ref_angle()
offset_ref_angle = self._get_offset_ref_angle()
theta = (offset_ref_angle)/180.*np.pi
dd = self._get_offset_radius()
dx, dy = dd * np.cos(theta), dd * np.sin(theta)
offset_tr.translate(dx, dy)
self.set_rotation(text_ref_angle+angle_orig)
super(LabelBase, self).draw(renderer)
offset_tr.clear()
# restore original properties
self.set_transform(tr)
self.set_rotation(angle_orig)
def get_window_extent(self, renderer):
self._update(renderer)
# save original and adjust some properties
tr = self.get_transform()
angle_orig = self.get_rotation()
offset_tr = Affine2D()
self.set_transform(tr+offset_tr)
text_ref_angle = self._get_text_ref_angle()
offset_ref_angle = self._get_offset_ref_angle()
theta = (offset_ref_angle)/180.*np.pi
dd = self._get_offset_radius()
dx, dy = dd * np.cos(theta), dd * np.sin(theta)
offset_tr.translate(dx, dy)
self.set_rotation(text_ref_angle+angle_orig)
bbox = super(LabelBase, self).get_window_extent(renderer).frozen()
offset_tr.clear()
# restore original properties
self.set_transform(tr)
self.set_rotation(angle_orig)
return bbox
def test_labelbase():
import matplotlib.pyplot as plt
fig = plt.figure(1)
fig.clf()
ax = fig.add_subplot(111)
ax.plot([0.5], [0.5], "o")
label = LabelBase(0.5, 0.5, "Test")
a = -90
label._set_ref_angle(a)
label._set_offset_radius(offset_radius=50)
label.set_rotation(-90)
label.set(ha="center", va="top")
ax.add_artist(label)
plt.draw()
class AxisLabel(LabelBase, AttributeCopier):
"""
Axis Label. Derived from Text. The position of the text is updated
in the fly, so changing text position has no effect. Otherwise, the
properties can be changed as a normal Text.
To change the pad between ticklabels and axis label, use set_pad.
"""
def __init__(self, *kl, **kwargs):
axis_direction = kwargs.pop("axis_direction", "bottom")
self._axis = kwargs.pop("axis", None)
#super(AxisLabel, self).__init__(*kl, **kwargs)
LabelBase.__init__(self, *kl, **kwargs)
AttributeCopier.__init__(self, self._axis, klass=LabelBase)
self.set_axis_direction(axis_direction)
self._pad = 5
self._extra_pad = 0
def set_pad(self, pad):
"""
Set the pad in points. Note that the actual pad will be the
sum of the internal pad and the external pad (that are set
automatically by the AxisArtist), and it only set the internal
pad
"""
self._pad = pad
def get_pad(self):
"""
return pad in points. See set_pad for more details.
"""
return self._pad
def _set_external_pad(self, p):
"""
Set external pad IN PIXELS. This is intended to be set by the
AxisArtist, bot by user..
"""
self._extra_pad = p
def _get_external_pad(self):
"""
Get external pad.
"""
return self._extra_pad
def get_ref_artist(self):
return self._axis.get_label()
def get_text(self):
t = super(AxisLabel, self).get_text()
if t == "__from_axes__":
return self._axis.get_label().get_text()
return self._text
_default_alignments = dict(left=("bottom", "center"),
right=("top", "center"),
bottom=("top", "center"),
top=("bottom", "center"))
def set_default_alignment(self, d):
if d not in ["left", "right", "top", "bottom"]:
raise ValueError('direction must be on of "left", "right", "top", "bottom"')
va, ha = self._default_alignments[d]
self.set_va(va)
self.set_ha(ha)
_default_angles = dict(left=180,
right=0,
bottom=0,
top=180)
def set_default_angle(self, d):
if d not in ["left", "right", "top", "bottom"]:
raise ValueError('direction must be on of "left", "right", "top", "bottom"')
self.set_rotation(self._default_angles[d])
def set_axis_direction(self, d):
"""
Adjust the text angle and text alignment of axis label
according to the matplotlib convention.
===================== ========== ========= ========== ==========
property left bottom right top
===================== ========== ========= ========== ==========
axislabel angle 180 0 0 180
axislabel va center top center bottom
axislabel ha right center right center
===================== ========== ========= ========== ==========
Note that the text angles are actually relative to (90 + angle
of the direction to the ticklabel), which gives 0 for bottom
axis.
"""
if d not in ["left", "right", "top", "bottom"]:
raise ValueError('direction must be on of "left", "right", "top", "bottom"')
self.set_default_alignment(d)
self.set_default_angle(d)
def get_color(self):
return self.get_attribute_from_ref_artist("color", "k")
def draw(self, renderer):
if not self.get_visible():
return
pad = renderer.points_to_pixels(self.get_pad())
r = self._get_external_pad() + pad
self._set_offset_radius(r)
super(AxisLabel, self).draw(renderer)
def get_window_extent(self, renderer):
if not self.get_visible():
return
pad = renderer.points_to_pixels(self.get_pad())
r = self._get_external_pad() + pad
self._set_offset_radius(r)
bb = super(AxisLabel, self).get_window_extent(renderer)
return bb
class TickLabels(AxisLabel, AttributeCopier): # mtext.Text
"""
Tick Labels. While derived from Text, this single artist draws all
ticklabels. As in AxisLabel, the position of the text is updated
in the fly, so changing text position has no effect. Otherwise,
the properties can be changed as a normal Text. Unlike the
ticklabels of the mainline matplotlib, properties of single
ticklabel alone cannot modified.
To change the pad between ticks and ticklabels, use set_pad.
"""
def __init__(self, **kwargs):
axis_direction = kwargs.pop("axis_direction", "bottom")
AxisLabel.__init__(self, **kwargs)
self.set_axis_direction(axis_direction)
#self._axis_direction = axis_direction
self._axislabel_pad = 0
#self._extra_pad = 0
# attribute copier
def get_ref_artist(self):
return self._axis.get_ticklabels()[0]
def set_axis_direction(self, label_direction):
"""
Adjust the text angle and text alignment of ticklabels
according to the matplotlib convention.
The *label_direction* must be one of [left, right, bottom,
top].
===================== ========== ========= ========== ==========
property left bottom right top
===================== ========== ========= ========== ==========
ticklabels angle 90 0 -90 180
ticklabel va center baseline center baseline
ticklabel ha right center right center
===================== ========== ========= ========== ==========
Note that the text angles are actually relative to (90 + angle
of the direction to the ticklabel), which gives 0 for bottom
axis.
"""
if label_direction not in ["left", "right", "top", "bottom"]:
raise ValueError('direction must be one of "left", "right", "top", "bottom"')
self._axis_direction = label_direction
self.set_default_alignment(label_direction)
self.set_default_angle(label_direction)
def invert_axis_direction(self):
label_direction = self._get_opposite_direction(self._axis_direction)
self.set_axis_direction(label_direction)
def _get_ticklabels_offsets(self, renderer, label_direction):
"""
Calculates the offsets of the ticklabels from the tick and
their total heights. The offset only takes account the offset
due to the vertical alignment of the ticklabels, i.e.,if axis
direction is bottom and va is ;top', it will return 0. if va
is 'baseline', it will return (height-descent).
"""
whd_list = self.get_texts_widths_heights_descents(renderer)
if not whd_list:
return 0, 0
r = 0
va, ha = self.get_va(), self.get_ha()
if label_direction == "left":
pad = max([w for (w, h, d) in whd_list])
if ha == "left":
r = pad
elif ha == "center":
r = .5 * pad
elif label_direction == "right":
pad = max([w for (w, h, d) in whd_list])
if ha == "right":
r = pad
elif ha == "center":
r = .5 * pad
elif label_direction == "bottom":
pad = max([h for (w, h, d) in whd_list])
if va == "bottom":
r = pad
elif va == "center":
r =.5 * pad
elif va == "baseline":
max_ascent = max([(h-d) for (w, h, d) in whd_list])
max_descent = max([d for (w, h, d) in whd_list])
r = max_ascent
pad = max_ascent + max_descent
elif label_direction == "top":
pad = max([h for (w, h, d) in whd_list])
if va == "top":
r = pad
elif va == "center":
r =.5 * pad
elif va == "baseline":
max_ascent = max([(h-d) for (w, h, d) in whd_list])
max_descent = max([d for (w, h, d) in whd_list])
r = max_descent
pad = max_ascent + max_descent
#tick_pad = renderer.points_to_pixels(self.get_pad())
# r : offset
# pad : total height of the ticklabels. This will be used to
# calculate the pad for the axislabel.
return r, pad
_default_alignments = dict(left=("center", "right"),
right=("center", "left"),
bottom=("baseline", "center"),
top=("baseline", "center"))
# set_default_alignments(self, d)
_default_angles = dict(left=90,
right=-90,
bottom=0,
top=180)
def draw(self, renderer):
if not self.get_visible():
self._axislabel_pad = self._get_external_pad()
return
r, total_width = self._get_ticklabels_offsets(renderer,
self._axis_direction)
#self._set_external_pad(r+self._get_external_pad())
pad = self._get_external_pad() + \
renderer.points_to_pixels(self.get_pad())
self._set_offset_radius(r+pad)
#self._set_offset_radius(r)
for (x, y), a, l in self._locs_angles_labels:
if not l.strip(): continue
self._set_ref_angle(a) #+ add_angle
self.set_x(x)
self.set_y(y)
self.set_text(l)
LabelBase.draw(self, renderer)
self._axislabel_pad = total_width \
+ pad # the value saved will be used to draw axislabel.
def set_locs_angles_labels(self, locs_angles_labels):
self._locs_angles_labels = locs_angles_labels
def get_window_extents(self, renderer):
if not self.get_visible():
self._axislabel_pad = self._get_external_pad()
return []
bboxes = []
r, total_width = self._get_ticklabels_offsets(renderer,
self._axis_direction)
pad = self._get_external_pad() + \
renderer.points_to_pixels(self.get_pad())
self._set_offset_radius(r+pad)
for (x, y), a, l in self._locs_angles_labels:
self._set_ref_angle(a) #+ add_angle
self.set_x(x)
self.set_y(y)
self.set_text(l)
bb = LabelBase.get_window_extent(self, renderer)
bboxes.append(bb)
self._axislabel_pad = total_width \
+ pad # the value saved will be used to draw axislabel.
return bboxes
def get_texts_widths_heights_descents(self, renderer):
"""
return a list of width, height, descent for ticklabels.
"""
whd_list = []
for (x, y), a, l in self._locs_angles_labels:
if not l.strip(): continue
clean_line, ismath = self.is_math_text(l)
whd = renderer.get_text_width_height_descent(
clean_line, self._fontproperties, ismath=ismath)
whd_list.append(whd)
return whd_list
def test_ticklabels():
import matplotlib.pyplot as plt
fig = plt.figure(1)
fig.clf()
ax = fig.add_subplot(111)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.plot([0.2, 0.4], [0.5, 0.5], "o")
ticks = Ticks(ticksize=10, axis=ax.xaxis)
ax.add_artist(ticks)
locs_angles_labels = [((0.2, 0.5), -90, "0.2"),
((0.4, 0.5), -120, "0.4")]
tick_locs_angles = [(xy, a+180) for xy, a, l in locs_angles_labels]
ticks.set_locs_angles(tick_locs_angles)
ax.plot([0.5], [0.5], ",")
axislabel = AxisLabel(0.5, 0.5, "Test")
axislabel._set_offset_radius(20)
axislabel._set_ref_angle(0)
axislabel.set_axis_direction("bottom")
#axislabel._text_follow_ref_angle = True
#axislabel.set(va="center", ha="right")
ax.add_artist(axislabel)
if 1:
ticklabels = TickLabels(axis_direction="left")
ticklabels._locs_angles_labels = locs_angles_labels
#ticklabels.set_rotation(90)
ticklabels.set_pad(10)
ax.add_artist(ticklabels)
ax.set_xlim(0, 1); ax.set_ylim(0, 1)
plt.draw()
class GridlinesCollection(LineCollection):
def __init__(self, *kl, **kwargs):
"""
*which* : "major" or "minor"
*axis* : "both", "x" or "y"
"""
self._which = kwargs.pop("which", "major")
self._axis = kwargs.pop("axis", "both")
super(GridlinesCollection, self).__init__(*kl, **kwargs)
self.set_grid_helper(None)
def set_which(self, which):
self._which = which
def set_axis(self, axis):
self._axis = axis
def set_grid_helper(self, grid_helper):
self._grid_helper = grid_helper
def draw(self, renderer):
if self._grid_helper is not None:
self._grid_helper.update_lim(self.axes)
gl = self._grid_helper.get_gridlines(self._which, self._axis)
if gl:
self.set_segments([np.transpose(l) for l in gl])
else:
self.set_segments([])
super(GridlinesCollection, self).draw(renderer)
class AxisArtist(martist.Artist):
"""
An artist which draws axis (a line along which the n-th axes coord
is constant) line, ticks, ticklabels, and axis label.
"""
ZORDER=2.5
# LABELPAD : as property
def _set_labelpad(self, v):
return self.label.set_pad(v)
def _get_labelpad(self):
return self.label.get_pad()
LABELPAD = property(_get_labelpad, _set_labelpad)
def __init__(self, axes,
helper,
offset=None,
axis_direction="bottom",
**kw):
"""
*axes* : axes
*helper* : an AxisArtistHelper instance.
"""
#axes is also used to follow the axis attribute (tick color, etc).
super(AxisArtist, self).__init__(**kw)
self.axes = axes
self._axis_artist_helper = helper
if offset is None:
offset = (0, 0)
self.dpi_transform = Affine2D()
self.offset_transform = ScaledTranslation(offset[0], offset[1],
self.dpi_transform)
self._label_visible = True
self._majortick_visible = True
self._majorticklabel_visible = True
self._minortick_visible = True
self._minorticklabel_visible = True
#if self._axis_artist_helper._loc in ["left", "right"]:
if axis_direction in ["left", "right"]:
axis_name = "ytick"
self.axis = axes.yaxis
else:
axis_name = "xtick"
self.axis = axes.xaxis
self._axisline_style = None
self._axis_direction = axis_direction
self._init_line()
self._init_ticks(axis_name, **kw)
self._init_offsetText(axis_direction)
self._init_label()
self.set_zorder(self.ZORDER)
self._rotate_label_along_line = False
# axis direction
self._tick_add_angle = 180.
self._ticklabel_add_angle = 0.
self._axislabel_add_angle = 0.
self.set_axis_direction(axis_direction)
# axis direction
def set_axis_direction(self, axis_direction):
"""
Adjust the direction, text angle, text alignment of
ticklabels, labels following the matplotlib convention for
the rectangle axes.
The *axis_direction* must be one of [left, right, bottom,
top].
===================== ========== ========= ========== ==========
property left bottom right top
===================== ========== ========= ========== ==========
ticklabels location "-" "+" "+" "-"
axislabel location "-" "+" "+" "-"
ticklabels angle 90 0 -90 180
ticklabel va center baseline center baseline
ticklabel ha right center right center
axislabel angle 180 0 0 180
axislabel va center top center bottom
axislabel ha right center right center
===================== ========== ========= ========== ==========
Note that the direction "+" and "-" are relative to the direction of
the increasing coordinate. Also, the text angles are actually
relative to (90 + angle of the direction to the ticklabel),
which gives 0 for bottom axis.
"""
if axis_direction not in ["left", "right", "top", "bottom"]:
raise ValueError('direction must be on of "left", "right", "top", "bottom"')
self._axis_direction = axis_direction
if axis_direction in ["left", "top"]:
#self._set_tick_direction("+")
self.set_ticklabel_direction("-")
self.set_axislabel_direction("-")
else:
#self._set_tick_direction("-")
self.set_ticklabel_direction("+")
self.set_axislabel_direction("+")
self.major_ticklabels.set_axis_direction(axis_direction)
self.label.set_axis_direction(axis_direction)
# def _set_tick_direction(self, d):
# if d not in ["+", "-"]:
# raise ValueError('direction must be on of "in", "out"')
# if d == "+":
# self._tick_add_angle = 0 #get_helper()._extremes=0, 10
# else:
# self._tick_add_angle = 180 #get_helper()._extremes=0, 10
def set_ticklabel_direction(self, tick_direction):
"""
Adjust the direction of the ticklabel.
ACCEPTS: [ "+" | "-" ]
Note that the label_direction '+' and '-' are relative to the
direction of the increasing coordinate.
"""
if tick_direction not in ["+", "-"]:
raise ValueError('direction must be one of "+", "-"')
if tick_direction == "-":
self._ticklabel_add_angle = 180
else:
self._ticklabel_add_angle = 0
def invert_ticklabel_direction(self):
self._ticklabel_add_angle = (self._ticklabel_add_angle + 180) % 360
self.major_ticklabels.invert_axis_direction()
self.minor_ticklabels.invert_axis_direction()
# def invert_ticks_direction(self):
# self.major_ticks.set_tick_out(not self.major_ticks.get_tick_out())
# self.minor_ticks.set_tick_out(not self.minor_ticks.get_tick_out())
def set_axislabel_direction(self, label_direction):
"""
Adjust the direction of the axislabel.
ACCEPTS: [ "+" | "-" ]
Note that the label_direction '+' and '-' are relative to the
direction of the increasing coordinate.
"""
if label_direction not in ["+", "-"]:
raise ValueError('direction must be one of "+", "-"')
if label_direction == "-":
self._axislabel_add_angle = 180
else:
self._axislabel_add_angle = 0
def get_transform(self):
return self.axes.transAxes + self.offset_transform
def get_helper(self):
"""
Return axis artist helper instance.
"""
return self._axis_artist_helper
def set_axisline_style(self, axisline_style=None, **kw):
"""
Set the axisline style.
*axisline_style* can be a string with axisline style name with optional
comma-separated attributes. Alternatively, the attrs can
be provided as keywords.
set_arrowstyle("->,size=1.5")
set_arrowstyle("->", size=1.5)
Old attrs simply are forgotten.
Without argument (or with arrowstyle=None), return
available styles as a list of strings.
"""
if axisline_style==None:
return AxislineStyle.pprint_styles()
if isinstance(axisline_style, AxislineStyle._Base):
self._axisline_style = axisline_style
else:
self._axisline_style = AxislineStyle(axisline_style, **kw)
self._init_line()
def get_axisline_style(self):
"""
return the current axisline style.
"""
return self._axisline_style
def _init_line(self):
"""
Initialize the *line* artist that is responsible to draw the axis line.
"""
tran = self._axis_artist_helper.get_line_transform(self.axes) \
+ self.offset_transform
axisline_style = self.get_axisline_style()
if axisline_style is None:
self.line = BezierPath(self._axis_artist_helper.get_line(self.axes),
color=rcParams['axes.edgecolor'],
linewidth=rcParams['axes.linewidth'],
transform=tran)
else:
self.line = axisline_style(self, transform=tran)
def _draw_line(self, renderer):
self.line.set_path(self._axis_artist_helper.get_line(self.axes))
if self.get_axisline_style() is not None:
self.line.set_line_mutation_scale(self.major_ticklabels.get_size())
self.line.draw(renderer)
def _init_ticks(self, axis_name, **kw):
trans=self._axis_artist_helper.get_tick_transform(self.axes) \
+ self.offset_transform
major_tick_size = kw.get("major_tick_size",
rcParams['%s.major.size'%axis_name])
major_tick_pad = kw.get("major_tick_pad",
rcParams['%s.major.pad'%axis_name])
minor_tick_size = kw.get("minor_tick_size",
rcParams['%s.minor.size'%axis_name])
minor_tick_pad = kw.get("minor_tick_pad",
rcParams['%s.minor.pad'%axis_name])
self.major_ticks = Ticks(major_tick_size,
axis=self.axis,
transform=trans)
self.minor_ticks = Ticks(minor_tick_size,
axis=self.axis,
transform=trans)
if axis_name == "xaxis":
size = rcParams['xtick.labelsize']
else:
size = rcParams['ytick.labelsize']
fontprops = font_manager.FontProperties(size=size)
self.major_ticklabels = TickLabels(size=size, axis=self.axis,
axis_direction=self._axis_direction)
self.minor_ticklabels = TickLabels(size=size, axis=self.axis,
axis_direction=self._axis_direction)
self.major_ticklabels.set(figure = self.axes.figure,
transform=trans,
fontproperties=fontprops)
self.major_ticklabels.set_pad(major_tick_pad)
self.minor_ticklabels.set(figure = self.axes.figure,
transform=trans,
fontproperties=fontprops)
self.minor_ticklabels.set_pad(minor_tick_pad)
def _get_tick_info(self, tick_iter):
"""
return ticks_loc_angle, ticklabels_loc_angle_label
ticks_loc_angle : list of locs and angles for ticks
ticklabels_loc_angle_label : list of locs, angles and labels for tickslabels
"""
ticks_loc_angle = []
ticklabels_loc_angle_label = []
tick_add_angle = self._tick_add_angle
ticklabel_add_angle = self._ticklabel_add_angle
for loc, angle_normal, angle_tangent, label in tick_iter:
angle_label = angle_tangent - 90
angle_label += ticklabel_add_angle
if np.cos((angle_label - angle_normal)/180.*np.pi) < 0.:
angle_tick = angle_normal
else:
angle_tick = angle_normal + 180
ticks_loc_angle.append([loc, angle_tick])
ticklabels_loc_angle_label.append([loc, angle_label, label])
return ticks_loc_angle, ticklabels_loc_angle_label
def _update_ticks(self, renderer):
# set extra pad for major and minor ticklabels:
# use ticksize of majorticks even for minor ticks. not clear what is best.
dpi_cor = renderer.points_to_pixels(1.)
if self.major_ticks.get_visible() and self.major_ticks.get_tick_out():
self.major_ticklabels._set_external_pad(self.major_ticks._ticksize*dpi_cor)
self.minor_ticklabels._set_external_pad(self.major_ticks._ticksize*dpi_cor)
else:
self.major_ticklabels._set_external_pad(0)
self.minor_ticklabels._set_external_pad(0)
majortick_iter, minortick_iter = \
self._axis_artist_helper.get_tick_iterators(self.axes)
tick_loc_angle, ticklabel_loc_angle_label \
= self._get_tick_info(majortick_iter)
self.major_ticks.set_locs_angles(tick_loc_angle)
self.major_ticklabels.set_locs_angles_labels(ticklabel_loc_angle_label)
#self.major_ticks.draw(renderer)
#self.major_ticklabels.draw(renderer)
# minor ticks
tick_loc_angle, ticklabel_loc_angle_label \
= self._get_tick_info(minortick_iter)
self.minor_ticks.set_locs_angles(tick_loc_angle)
self.minor_ticklabels.set_locs_angles_labels(ticklabel_loc_angle_label)
#self.minor_ticks.draw(renderer)
#self.minor_ticklabels.draw(renderer)
#if (self.major_ticklabels.get_visible() or self.minor_ticklabels.get_visible()):
# self._draw_offsetText(renderer)
return self.major_ticklabels.get_window_extents(renderer)
def _draw_ticks(self, renderer):
extents = self._update_ticks(renderer)
self.major_ticks.draw(renderer)
self.major_ticklabels.draw(renderer)
self.minor_ticks.draw(renderer)
self.minor_ticklabels.draw(renderer)
if (self.major_ticklabels.get_visible() or self.minor_ticklabels.get_visible()):
self._draw_offsetText(renderer)
return extents
def _draw_ticks2(self, renderer):
# set extra pad for major and minor ticklabels:
# use ticksize of majorticks even for minor ticks. not clear what is best.
dpi_cor = renderer.points_to_pixels(1.)
if self.major_ticks.get_visible() and self.major_ticks.get_tick_out():
self.major_ticklabels._set_external_pad(self.major_ticks._ticksize*dpi_cor)
self.minor_ticklabels._set_external_pad(self.major_ticks._ticksize*dpi_cor)
else:
self.major_ticklabels._set_external_pad(0)
self.minor_ticklabels._set_external_pad(0)
majortick_iter, minortick_iter = \
self._axis_artist_helper.get_tick_iterators(self.axes)
tick_loc_angle, ticklabel_loc_angle_label \
= self._get_tick_info(majortick_iter)
self.major_ticks.set_locs_angles(tick_loc_angle)
self.major_ticklabels.set_locs_angles_labels(ticklabel_loc_angle_label)
self.major_ticks.draw(renderer)
self.major_ticklabels.draw(renderer)
# minor ticks
tick_loc_angle, ticklabel_loc_angle_label \
= self._get_tick_info(minortick_iter)
self.minor_ticks.set_locs_angles(tick_loc_angle)
self.minor_ticklabels.set_locs_angles_labels(ticklabel_loc_angle_label)
self.minor_ticks.draw(renderer)
self.minor_ticklabels.draw(renderer)
if (self.major_ticklabels.get_visible() or self.minor_ticklabels.get_visible()):
self._draw_offsetText(renderer)
return self.major_ticklabels.get_window_extents(renderer)
_offsetText_pos = dict(left=(0, 1, "bottom", "right"),
right=(1, 1, "bottom", "left"),
bottom=(1, 0, "top", "right"),
top=(1, 1, "bottom", "right"))
def _init_offsetText(self, direction):
x,y,va,ha = self._offsetText_pos[direction]
self.offsetText = mtext.Annotation("",
xy=(x,y), xycoords="axes fraction",
xytext=(0,0), textcoords="offset points",
#fontproperties = fp,
color = rcParams['xtick.color'],
verticalalignment=va,
horizontalalignment=ha,
)
self.offsetText.set_transform(IdentityTransform())
self.axes._set_artist_props(self.offsetText)
def _update_offsetText(self):
self.offsetText.set_text( self.axis.major.formatter.get_offset() )
self.offsetText.set_size(self.major_ticklabels.get_size())
offset = self.major_ticklabels.get_pad() + self.major_ticklabels.get_size() + 2.
self.offsetText.xyann= (0, offset)
def _draw_offsetText(self, renderer):
self._update_offsetText()
self.offsetText.draw(renderer)
def _init_label(self, **kw):
# x in axes coords, y in display coords (to be updated at draw
# time by _update_label_positions)
labelsize = kw.get("labelsize",
rcParams['axes.labelsize'])
#labelcolor = kw.get("labelcolor",
# rcParams['axes.labelcolor'])
fontprops = font_manager.FontProperties(
size=labelsize,
weight=rcParams['axes.labelweight'])
textprops = dict(fontproperties = fontprops)
#color = labelcolor)
tr = self._axis_artist_helper.get_axislabel_transform(self.axes) \
+ self.offset_transform
self.label = AxisLabel(0, 0, "__from_axes__",
color = "auto", #rcParams['axes.labelcolor'],
fontproperties=fontprops,
axis=self.axis,
transform=tr,
axis_direction=self._axis_direction,
)
self.label.set_figure(self.axes.figure)
labelpad = kw.get("labelpad", 5)
self.label.set_pad(labelpad)
def _update_label(self, renderer):
if not self.label.get_visible():
return
fontprops = font_manager.FontProperties(
size=rcParams['axes.labelsize'],
weight=rcParams['axes.labelweight'])
#pad_points = self.major_tick_pad
#print self._ticklabel_add_angle - self._axislabel_add_angle
#if abs(self._ticklabel_add_angle - self._axislabel_add_angle)%360 > 90:
if self._ticklabel_add_angle != self._axislabel_add_angle:
if (self.major_ticks.get_visible() and not self.major_ticks.get_tick_out()) \
or \
(self.minor_ticks.get_visible() and not self.major_ticks.get_tick_out()):
axislabel_pad = self.major_ticks._ticksize
else:
axislabel_pad = 0
else:
axislabel_pad = max([self.major_ticklabels._axislabel_pad,
self.minor_ticklabels._axislabel_pad])
#label_offset = axislabel_pad + self.LABELPAD
#self.label._set_offset_radius(label_offset)
self.label._set_external_pad(axislabel_pad)
xy, angle_tangent = self._axis_artist_helper.get_axislabel_pos_angle(self.axes)
if xy is None: return
angle_label = angle_tangent - 90
x, y = xy
self.label._set_ref_angle(angle_label+self._axislabel_add_angle)
self.label.set(x=x, y=y)
def _draw_label(self, renderer):
self._update_label(renderer)
self.label.draw(renderer)
def _draw_label2(self, renderer):
if not self.label.get_visible():
return
fontprops = font_manager.FontProperties(
size=rcParams['axes.labelsize'],
weight=rcParams['axes.labelweight'])
#pad_points = self.major_tick_pad
#print self._ticklabel_add_angle - self._axislabel_add_angle
#if abs(self._ticklabel_add_angle - self._axislabel_add_angle)%360 > 90:
if self._ticklabel_add_angle != self._axislabel_add_angle:
if (self.major_ticks.get_visible() and not self.major_ticks.get_tick_out()) \
or \
(self.minor_ticks.get_visible() and not self.major_ticks.get_tick_out()):
axislabel_pad = self.major_ticks._ticksize
else:
axislabel_pad = 0
else:
axislabel_pad = max([self.major_ticklabels._axislabel_pad,
self.minor_ticklabels._axislabel_pad])
#label_offset = axislabel_pad + self.LABELPAD
#self.label._set_offset_radius(label_offset)
self.label._set_external_pad(axislabel_pad)
xy, angle_tangent = self._axis_artist_helper.get_axislabel_pos_angle(self.axes)
if xy is None: return
angle_label = angle_tangent - 90
x, y = xy
self.label._set_ref_angle(angle_label+self._axislabel_add_angle)
self.label.set(x=x, y=y)
self.label.draw(renderer)
def set_label(self, s):
self.label.set_text(s)
def get_tightbbox(self, renderer):
if not self.get_visible(): return
self._axis_artist_helper.update_lim(self.axes)
dpi_cor = renderer.points_to_pixels(1.)
self.dpi_transform.clear().scale(dpi_cor, dpi_cor)
bb = []
self._update_ticks(renderer)
#if self.major_ticklabels.get_visible():
bb.extend(self.major_ticklabels.get_window_extents(renderer))
#if self.minor_ticklabels.get_visible():
bb.extend(self.minor_ticklabels.get_window_extents(renderer))
self._update_label(renderer)
#if self.label.get_visible():
bb.append(self.label.get_window_extent(renderer))
bb.append(self.offsetText.get_window_extent(renderer))
bb = [b for b in bb if b and (b.width!=0 or b.height!=0)]
if bb:
_bbox = Bbox.union(bb)
return _bbox
else:
return None
#self._draw_line(renderer)
#self._draw_ticks(renderer)
#self._draw_offsetText(renderer)
#self._draw_label(renderer)
@allow_rasterization
def draw(self, renderer):
'Draw the axis lines, tick lines and labels'
if not self.get_visible(): return
renderer.open_group(__name__)
self._axis_artist_helper.update_lim(self.axes)
dpi_cor = renderer.points_to_pixels(1.)
self.dpi_transform.clear().scale(dpi_cor, dpi_cor)
self._draw_ticks(renderer)
self._draw_line(renderer)
#self._draw_offsetText(renderer)
self._draw_label(renderer)
renderer.close_group(__name__)
#def get_ticklabel_extents(self, renderer):
# pass
def toggle(self, all=None, ticks=None, ticklabels=None, label=None):
"""
Toggle visibility of ticks, ticklabels, and (axis) label.
To turn all off, ::
axis.toggle(all=False)
To turn all off but ticks on ::
axis.toggle(all=False, ticks=True)
To turn all on but (axis) label off ::
axis.toggle(all=True, label=False))
"""
if all:
_ticks, _ticklabels, _label = True, True, True
elif all is not None:
_ticks, _ticklabels, _label = False, False, False
else:
_ticks, _ticklabels, _label = None, None, None
if ticks is not None:
_ticks = ticks
if ticklabels is not None:
_ticklabels = ticklabels
if label is not None:
_label = label
if _ticks is not None:
self.major_ticks.set_visible(_ticks)
self.minor_ticks.set_visible(_ticks)
if _ticklabels is not None:
self.major_ticklabels.set_visible(_ticklabels)
self.minor_ticklabels.set_visible(_ticklabels)
if _label is not None:
self.label.set_visible(_label)
def test_axis_artist():
global axisline
#self._axislines[loc] = new_fixed_axis(loc=loc, axes=axes)
from mpl_toolkits.axisartist import AxisArtistHelperRectlinear
fig = plt.figure(1)
fig.clf()
ax=fig.add_subplot(111)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
if 1:
_helper = AxisArtistHelperRectlinear.Fixed(ax, loc="left")
axisline = AxisArtist(ax, _helper, offset=None, axis_direction="left")
ax.add_artist(axisline)
_helper = AxisArtistHelperRectlinear.Fixed(ax, loc="right")
axisline = AxisArtist(ax, _helper, offset=None, axis_direction="right")
ax.add_artist(axisline)
_helper = AxisArtistHelperRectlinear.Fixed(ax, loc="bottom")
axisline = AxisArtist(ax, _helper, offset=None, axis_direction="bottom")
axisline.set_label("TTT")
#axisline.label.set_visible(False)
ax.add_artist(axisline)
#axisline.major_ticklabels.set_axis_direction("bottom")
axisline.major_ticks.set_tick_out(False)
ax.set_ylabel("Test")
axisline.label.set_pad(5)
plt.draw()
def test_axis_artist2():
global axisline
#self._axislines[loc] = new_fixed_axis(loc=loc, axes=axes)
from mpl_toolkits.axislines import AxisArtistHelperRectlinear
fig = plt.figure(1)
fig.clf()
ax=fig.add_subplot(111)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
_helper = AxisArtistHelperRectlinear.Fixed(ax, loc="bottom")
axisline = AxisArtist(ax, _helper, offset=None, axis_direction="bottom")
axisline.set_label("TTT")
ax.add_artist(axisline)
#axisline.major_ticklabels.set_axis_direction("bottom")
axisline.major_ticks.set_tick_out(False)
ax.set_ylabel("Test")
plt.draw()
if __name__ == "__main__":
#test_labelbase()
#test_ticklabels()
test_axis_artist()
#test_axis_artist2()
# DONE
# *. ticks, ticklabels, axislabels
# *. workon axisartist
# TODO
|
mit
|
jgerschler/ESL-Games
|
Kinect/Adjective Adverb/Adverb or Adjective (Deprecated)/AdverbOrAdjectiveFullScreen.py
|
1
|
12778
|
# for python 3
# You'll need to customize this according to your needs. Proper orientation of
# the kinect is vital; if participants are able to maintain their head or wrists
# continuously inside the word rects, they will repeatedly trigger the collision
# detection
from pykinect2 import PyKinectV2
from pykinect2.PyKinectV2 import *
from pykinect2 import PyKinectRuntime
import pygame
import random
import os
import sys
TRACKING_COLOR = pygame.color.Color("green")
HIGHLIGHT_COLOR = pygame.color.Color("red")
BG_COLOR = pygame.color.Color("white")
GAME_TIME = 60# seconds
class BodyGameRuntime(object):
def __init__(self):
pygame.init()
pygame.mixer.init()
self.beep_sound = pygame.mixer.Sound('audio\\beep.ogg')
self.buzz_sound = pygame.mixer.Sound('audio\\buzz.ogg')
self._screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN, 32)
pygame.display.set_caption("Kinect Game Framework Test")
self.finished = False
self._clock = pygame.time.Clock()
self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color |
PyKinectV2.FrameSourceTypes_Body)
self._frame_surface = pygame.Surface((self._kinect.color_frame_desc.Width,
self._kinect.color_frame_desc.Height), 0, 32)
self._bodies = None
self.score = 0
self.vocab_dict = {"People drive ____ these days.":["quickly", "quick"],
"She has an ____ dog.":["active", "actively"],
"He ____ opens the mail.":["carefully", "careful"],
"The man ____ greets his friends.":["cheerfully", "cheerful"],
"That is a ____ sofa!":["comfortable", "comfortably"],
"The alarm sounds ____.":["continuously", "continuous"],
"That woman is ____!":["crazy", "crazily"],
"The woman speaks ____.":["delightfully", "delightful"],
"Juan is a very ____ carpenter.":["creative", "creatively"],
"Wow! That is a ____ storm!":["destructive", "destructively"],
"The racecar drove ____ by the school.":["powerfully", "powerful"],
"Juana ____ said NO!":["firmly", "firm"],
"He ____ opened the door.":["forcefully", "forceful"],
"It was a ____ day.":["glorious", "gloriously"],
"Maria ____ observed her ex-boyfriend.":["hatefully", "hateful"],
"He had a ___ idea.":["hopeful", "hopefully"],
"It was an ____ phrase.":["insulting", "insultingly"],
"Jenny ____ ate the last cookie.":["intentionally", "intentional"],
"He likes ____ music.":["irritating", "irritatingly"],
"Careful! That is a ___ dog!":["bad", "badly"],
"The man reacted ___ to the good news.":["speedily", "speedy"],
"Susana has always been a ____ girl.":["nice", "nicely"],
"The boys plunged into the ____ water.":["deep", "deeply"],
"The girl ____ saved her cat from the fire.":["bravely", "brave"],
"The man ____ drank too much alcohol.":["foolishly", "foolish"],
"Mario is ____ and never does his homework.":["lazy", "lazily"],
"The teacher is very ____.":["rude", "rudely"],
"The girl plays soccer ____.":["perfectly", "perfect"],
"It was an ____ crash.":["accidental", "accidentally"],
"That is an ____ turtle!.":["angry", "angrily"],
"She ____ ate her beans.":["happily", "happy"],
"John spoke ____.":["seriously", "serious"],
"Firulais is a ____ dog.":["loyal", "loyally"],
"Margie yelled ____ into the night.":["blindly", "blind"],
"He ran ____ toward me.":["wildly", "wild"],
"Pedro is ____!":["innocent", "innocently"],
"The gross man winked at her ____.":["sexually", "sexual"],
"Concepcion is a ____ girlfriend.":["jealous", "jealously"],
"Luis ____ goes to the bar.":["frequently", "frequent"],
"We didn't go out because it was raining ____.":["heavily", "heavy"],
"Our team lost the game because we played ____.":["badly", "bad"],
"We waited ____.":["patiently", "patient"],
"Jimmy arrived ____.":["unexpectedly", "unexpected"],
"Mike stays fit by playing tennis ____.":["regularly", "regular"],
"The driver of the car was ____ injured.":["seriously", "serious"],
"The driver of the car had ____ injuries.":["serious", "seriously"],
"Ismael looked ____ at Eleazar.":["hungrily", "hungry"],
"She is a ____ driver.":["dangerous", "dangerously"]}
self._frame_surface.fill((255, 255, 255))
def text_objects(self, text, font):
text_surface = font.render(text, True, (0, 0, 0))
return text_surface, text_surface.get_rect()
def message_display(self, text, loc_tuple, loc_int):
# loc_int: 1 center, 2 top left, 3 bottom left, 4 bottom right, 5 top right
text_surf, text_rect = self.text_objects(text, pygame.font.Font(None, 64))
loc_dict = {1:'text_rect.center', 2:'text_rect.topleft', 3:'text_rect.bottomleft',
4:'text_rect.bottomright', 5:'text_rect.topright'}
exec(loc_dict[loc_int] + ' = loc_tuple')
self._frame_surface.blit(text_surf, text_rect)
return text_rect
def draw_ind_point(self, joints, jointPoints, color, highlight_color, rect0, rect1, joint0, words, sentence, correct_word):
joint0State = joints[joint0].TrackingState;
if (joint0State == PyKinectV2.TrackingState_NotTracked or
joint0State == PyKinectV2.TrackingState_Inferred):
return
center = (int(jointPoints[joint0].x), int(jointPoints[joint0].y))
if (rect0.collidepoint(center) and words[0] == correct_word) or (rect1.collidepoint(center) and words[1] == correct_word):
self.score += 1
self.beep_sound.play()
pygame.time.delay(500)
self.new_round()
elif rect0.collidepoint(center) or rect1.collidepoint(center):
try:
pygame.draw.circle(self._frame_surface, highlight_color, center, 20, 0)
self.score -= 1
self.buzz_sound.play()
pygame.time.delay(500)
self.new_round()
except:
pass
else:
try:
pygame.draw.circle(self._frame_surface, color, center, 20, 0)
except:
pass
def draw_ind_intro_point(self, joints, jointPoints, color, joint0):
joint0State = joints[joint0].TrackingState;
if (joint0State == PyKinectV2.TrackingState_NotTracked or
joint0State == PyKinectV2.TrackingState_Inferred):
return
center = (int(jointPoints[joint0].x), int(jointPoints[joint0].y))
try:
pygame.draw.circle(self._frame_surface, color, center, 20, 0)
except:
pass
def update_intro_screen(self, joints, jointPoints, color):
self._frame_surface.fill(BG_COLOR)# blank screen before drawing points
self.draw_ind_intro_point(joints, jointPoints, color, PyKinectV2.JointType_Head)
self.draw_ind_intro_point(joints, jointPoints, color, PyKinectV2.JointType_WristLeft)
# may change PyKinectV2.JointType_WristRight to PyKinectV2.JointType_ElbowRight
self.draw_ind_intro_point(joints, jointPoints, color, PyKinectV2.JointType_WristRight)
def update_screen(self, joints, jointPoints, color, highlight_color, words, sentence, correct_word, seconds):
self._frame_surface.fill(BG_COLOR)
self.message_display(sentence, (300, 900), 2)
rect0 = self.message_display(words[0], (400, 300), 1)
rect1 = self.message_display(words[1], (self._frame_surface.get_width() - 400, 300), 1)
self.message_display(str(self.score), (self._frame_surface.get_width() / 2, 800), 1)
self.message_display(str(seconds), (self._frame_surface.get_width() - 300, 800), 1)
self.draw_ind_point(joints, jointPoints, color, highlight_color, rect0,
rect1, PyKinectV2.JointType_Head, words, sentence, correct_word)
self.draw_ind_point(joints, jointPoints, color, highlight_color, rect0,
rect1, PyKinectV2.JointType_WristRight, words, sentence, correct_word)
# may change PyKinectV2.JointType_WristRight to PyKinectV2.JointType_ElbowRight
self.draw_ind_point(joints, jointPoints, color, highlight_color, rect0,
rect1, PyKinectV2.JointType_WristLeft, words, sentence, correct_word)
def end_game(self):
self._frame_surface.fill(BG_COLOR)
self.message_display("Score: {}".format(self.score), (self._frame_surface.get_width() / 2, self._frame_surface.get_height() / 2), 1)
self._screen.blit(self._frame_surface, (0, 0))
pygame.display.update()
pygame.time.delay(3000)
self.run()
def new_round(self):
sentence = random.sample(list(self.vocab_dict), 1)[0]
words = self.vocab_dict[sentence][:]
correct_word = words[0]
random.shuffle(words)
pygame.time.delay(500)
while not self.finished:
seconds = int(GAME_TIME - (pygame.time.get_ticks() - self.start_ticks)/1000)
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.finished = True
if event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:
self.finished = True
if seconds <= 0:
self.end_game()
if self._kinect.has_new_body_frame():
self._bodies = self._kinect.get_last_body_frame()
if self._bodies is not None:
for i in range(0, self._kinect.max_body_count):
body = self._bodies.bodies[i]
if not body.is_tracked:
continue
joints = body.joints
joint_points = self._kinect.body_joints_to_color_space(joints)
self.update_screen(joints, joint_points, TRACKING_COLOR, HIGHLIGHT_COLOR, words, sentence, correct_word, seconds)
self._screen.blit(self._frame_surface, (0,0))
pygame.display.update()
self._clock.tick(60)
self.end_game()
def run(self):
self.score = 0
while not self.finished:
if self._kinect.has_new_body_frame():
self._bodies = self._kinect.get_last_body_frame()
if self._bodies is not None:
for i in range(0, self._kinect.max_body_count):
body = self._bodies.bodies[i]
if not body.is_tracked:
continue
joints = body.joints
joint_points = self._kinect.body_joints_to_color_space(joints)
self.update_intro_screen(joints, joint_points, TRACKING_COLOR)
self._screen.blit(self._frame_surface, (0,0))
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.finished = True
if event.type == pygame.KEYUP and event.key == pygame.K_SPACE:
self.start_ticks = pygame.time.get_ticks()
self.new_round()
if event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:
self.finished = True
self._clock.tick(60)
self._kinect.close()
pygame.quit()
#os._exit(0)
sys.exit()
if __name__ == "__main__":
game = BodyGameRuntime()
game.run()
|
mit
|
wxgeo/geophar
|
wxgeometrie/sympy/codegen/tests/test_applications.py
|
7
|
2142
|
# This file contains tests that exercise multiple AST nodes
from sympy.external import import_module
from sympy.printing.ccode import ccode
from sympy.utilities._compilation import compile_link_import_strings, has_c
from sympy.utilities._compilation.util import TemporaryDirectory, may_xfail
from sympy.utilities.pytest import skip
from sympy.sets import Range
from sympy.codegen.ast import (
FunctionDefinition, FunctionPrototype, Variable, Pointer, real, Assignment,
integer, Variable, CodeBlock, While
)
from sympy.codegen.cnodes import void, PreIncrement
from sympy.codegen.cutils import render_as_source_file
cython = import_module('cython')
np = import_module('numpy')
def _mk_func1():
declars = n, inp, out = Variable('n', integer), Pointer('inp', real), Pointer('out', real)
i = Variable('i', integer)
whl = While(i<n, [Assignment(out[i], inp[i]), PreIncrement(i)])
body = CodeBlock(i.as_Declaration(value=0), whl)
return FunctionDefinition(void, 'our_test_function', declars, body)
def _render_compile_import(funcdef, build_dir):
code_str = render_as_source_file(funcdef, settings=dict(contract=False))
declar = ccode(FunctionPrototype.from_FunctionDefinition(funcdef))
return compile_link_import_strings([
('our_test_func.c', code_str),
('_our_test_func.pyx', ("cdef extern {declar}\n"
"def _{fname}({typ}[:] inp, {typ}[:] out):\n"
" {fname}(inp.size, &inp[0], &out[0])").format(
declar=declar, fname=funcdef.name, typ='double'
))
], build_dir=build_dir)
@may_xfail
def test_copying_function():
if not np:
skip("numpy not installed.")
if not has_c():
skip("No C compiler found.")
if not cython:
skip("Cython not found.")
info = None
with TemporaryDirectory() as folder:
mod, info = _render_compile_import(_mk_func1(), build_dir=folder)
inp = np.arange(10.0)
out = np.empty_like(inp)
mod._our_test_function(inp, out)
assert np.allclose(inp, out)
|
gpl-2.0
|
mastizada/kuma
|
vendor/lib/python/south/migration/base.py
|
57
|
16235
|
from __future__ import print_function
from collections import deque
import datetime
from imp import reload
import os
import re
import sys
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.conf import settings
from django.utils import importlib
from south import exceptions
from south.migration.utils import depends, dfs, flatten, get_app_label
from south.orm import FakeORM
from south.utils import memoize, ask_for_it_by_name, datetime_utils
from south.migration.utils import app_label_to_app_module
from south.utils.py3 import string_types, with_metaclass
def all_migrations(applications=None):
"""
Returns all Migrations for all `applications` that are migrated.
"""
if applications is None:
applications = models.get_apps()
for model_module in applications:
# The app they've passed is the models module - go up one level
app_path = ".".join(model_module.__name__.split(".")[:-1])
app = ask_for_it_by_name(app_path)
try:
yield Migrations(app)
except exceptions.NoMigrations:
pass
def application_to_app_label(application):
"Works out the app label from either the app label, the app name, or the module"
if isinstance(application, string_types):
app_label = application.split('.')[-1]
else:
app_label = application.__name__.split('.')[-1]
return app_label
class MigrationsMetaclass(type):
"""
Metaclass which ensures there is only one instance of a Migrations for
any given app.
"""
def __init__(self, name, bases, dict):
super(MigrationsMetaclass, self).__init__(name, bases, dict)
self.instances = {}
def __call__(self, application, **kwds):
app_label = application_to_app_label(application)
# If we don't already have an instance, make one
if app_label not in self.instances:
self.instances[app_label] = super(MigrationsMetaclass, self).__call__(app_label_to_app_module(app_label), **kwds)
return self.instances[app_label]
def _clear_cache(self):
"Clears the cache of Migration objects."
self.instances = {}
class Migrations(with_metaclass(MigrationsMetaclass, list)):
"""
Holds a list of Migration objects for a particular app.
"""
if getattr(settings, "SOUTH_USE_PYC", False):
MIGRATION_FILENAME = re.compile(r'(?!__init__)' # Don't match __init__.py
r'[0-9a-zA-Z_]*' # Don't match dotfiles, or names with dots/invalid chars in them
r'(\.pyc?)?$') # Match .py or .pyc files, or module dirs
else:
MIGRATION_FILENAME = re.compile(r'(?!__init__)' # Don't match __init__.py
r'[0-9a-zA-Z_]*' # Don't match dotfiles, or names with dots/invalid chars in them
r'(\.py)?$') # Match only .py files, or module dirs
def __init__(self, application, force_creation=False, verbose_creation=True):
"Constructor. Takes the module of the app, NOT its models (like get_app returns)"
self._cache = {}
self.set_application(application, force_creation, verbose_creation)
def create_migrations_directory(self, verbose=True):
"Given an application, ensures that the migrations directory is ready."
migrations_dir = self.migrations_dir()
# Make the directory if it's not already there
if not os.path.isdir(migrations_dir):
if verbose:
print("Creating migrations directory at '%s'..." % migrations_dir)
os.mkdir(migrations_dir)
# Same for __init__.py
init_path = os.path.join(migrations_dir, "__init__.py")
if not os.path.isfile(init_path):
# Touch the init py file
if verbose:
print("Creating __init__.py in '%s'..." % migrations_dir)
open(init_path, "w").close()
def migrations_dir(self):
"""
Returns the full path of the migrations directory.
If it doesn't exist yet, returns where it would exist, based on the
app's migrations module (defaults to app.migrations)
"""
module_path = self.migrations_module()
try:
module = importlib.import_module(module_path)
except ImportError:
# There's no migrations module made yet; guess!
try:
parent = importlib.import_module(".".join(module_path.split(".")[:-1]))
except ImportError:
# The parent doesn't even exist, that's an issue.
raise exceptions.InvalidMigrationModule(
application = self.application.__name__,
module = module_path,
)
else:
# Good guess.
return os.path.join(os.path.dirname(parent.__file__), module_path.split(".")[-1])
else:
# Get directory directly
return os.path.dirname(module.__file__)
def migrations_module(self):
"Returns the module name of the migrations module for this"
app_label = application_to_app_label(self.application)
if hasattr(settings, "SOUTH_MIGRATION_MODULES"):
if app_label in settings.SOUTH_MIGRATION_MODULES:
# There's an override.
return settings.SOUTH_MIGRATION_MODULES[app_label]
return self._application.__name__ + '.migrations'
def get_application(self):
return self._application
def set_application(self, application, force_creation=False, verbose_creation=True):
"""
Called when the application for this Migrations is set.
Imports the migrations module object, and throws a paddy if it can't.
"""
self._application = application
if not hasattr(application, 'migrations'):
try:
module = importlib.import_module(self.migrations_module())
self._migrations = application.migrations = module
except ImportError:
if force_creation:
self.create_migrations_directory(verbose_creation)
module = importlib.import_module(self.migrations_module())
self._migrations = application.migrations = module
else:
raise exceptions.NoMigrations(application)
self._load_migrations_module(application.migrations)
application = property(get_application, set_application)
def _load_migrations_module(self, module):
self._migrations = module
filenames = []
dirname = self.migrations_dir()
for f in os.listdir(dirname):
if self.MIGRATION_FILENAME.match(os.path.basename(f)):
full_path = os.path.join(dirname, f)
# If it's a .pyc file, only append if the .py isn't already around
if f.endswith(".pyc") and (os.path.isfile(full_path[:-1])):
continue
# If it's a module directory, only append if it contains __init__.py[c].
if os.path.isdir(full_path):
if not (os.path.isfile(os.path.join(full_path, "__init__.py")) or \
(getattr(settings, "SOUTH_USE_PYC", False) and \
os.path.isfile(os.path.join(full_path, "__init__.pyc")))):
continue
filenames.append(f)
filenames.sort()
self.extend(self.migration(f) for f in filenames)
def migration(self, filename):
name = Migration.strip_filename(filename)
if name not in self._cache:
self._cache[name] = Migration(self, name)
return self._cache[name]
def __getitem__(self, value):
if isinstance(value, string_types):
return self.migration(value)
return super(Migrations, self).__getitem__(value)
def _guess_migration(self, prefix):
prefix = Migration.strip_filename(prefix)
matches = [m for m in self if m.name().startswith(prefix)]
if len(matches) == 1:
return matches[0]
elif len(matches) > 1:
raise exceptions.MultiplePrefixMatches(prefix, matches)
else:
raise exceptions.UnknownMigration(prefix, None)
def guess_migration(self, target_name):
if target_name == 'zero' or not self:
return
elif target_name is None:
return self[-1]
else:
return self._guess_migration(prefix=target_name)
def app_label(self):
return self._application.__name__.split('.')[-1]
def full_name(self):
return self._migrations.__name__
@classmethod
def calculate_dependencies(cls, force=False):
"Goes through all the migrations, and works out the dependencies."
if getattr(cls, "_dependencies_done", False) and not force:
return
for migrations in all_migrations():
for migration in migrations:
migration.calculate_dependencies()
cls._dependencies_done = True
@staticmethod
def invalidate_all_modules():
"Goes through all the migrations, and invalidates all cached modules."
for migrations in all_migrations():
for migration in migrations:
migration.invalidate_module()
def next_filename(self, name):
"Returns the fully-formatted filename of what a new migration 'name' would be"
highest_number = 0
for migration in self:
try:
number = int(migration.name().split("_")[0])
highest_number = max(highest_number, number)
except ValueError:
pass
# Work out the new filename
return "%04i_%s.py" % (
highest_number + 1,
name,
)
class Migration(object):
"""
Class which represents a particular migration file on-disk.
"""
def __init__(self, migrations, filename):
"""
Returns the migration class implied by 'filename'.
"""
self.migrations = migrations
self.filename = filename
self.dependencies = set()
self.dependents = set()
def __str__(self):
return self.app_label() + ':' + self.name()
def __repr__(self):
return '<Migration: %s>' % str(self)
def __eq__(self, other):
return self.app_label() == other.app_label() and self.name() == other.name()
def __hash__(self):
return hash(str(self))
def app_label(self):
return self.migrations.app_label()
@staticmethod
def strip_filename(filename):
return os.path.splitext(os.path.basename(filename))[0]
def name(self):
return self.strip_filename(os.path.basename(self.filename))
def full_name(self):
return self.migrations.full_name() + '.' + self.name()
def migration(self):
"Tries to load the actual migration module"
full_name = self.full_name()
try:
migration = sys.modules[full_name]
except KeyError:
try:
migration = __import__(full_name, {}, {}, ['Migration'])
except ImportError as e:
raise exceptions.UnknownMigration(self, sys.exc_info())
except Exception as e:
raise exceptions.BrokenMigration(self, sys.exc_info())
# Override some imports
migration._ = lambda x: x # Fake i18n
migration.datetime = datetime_utils
return migration
migration = memoize(migration)
def migration_class(self):
"Returns the Migration class from the module"
return self.migration().Migration
def migration_instance(self):
"Instantiates the migration_class"
return self.migration_class()()
migration_instance = memoize(migration_instance)
def previous(self):
"Returns the migration that comes before this one in the sequence."
index = self.migrations.index(self) - 1
if index < 0:
return None
return self.migrations[index]
previous = memoize(previous)
def next(self):
"Returns the migration that comes after this one in the sequence."
index = self.migrations.index(self) + 1
if index >= len(self.migrations):
return None
return self.migrations[index]
next = memoize(next)
def _get_dependency_objects(self, attrname):
"""
Given the name of an attribute (depends_on or needed_by), either yields
a list of migration objects representing it, or errors out.
"""
for app, name in getattr(self.migration_class(), attrname, []):
try:
migrations = Migrations(app)
except ImproperlyConfigured:
raise exceptions.DependsOnUnmigratedApplication(self, app)
migration = migrations.migration(name)
try:
migration.migration()
except exceptions.UnknownMigration:
raise exceptions.DependsOnUnknownMigration(self, migration)
if migration.is_before(self) == False:
raise exceptions.DependsOnHigherMigration(self, migration)
yield migration
def calculate_dependencies(self):
"""
Loads dependency info for this migration, and stores it in itself
and any other relevant migrations.
"""
# Normal deps first
for migration in self._get_dependency_objects("depends_on"):
self.dependencies.add(migration)
migration.dependents.add(self)
# And reverse deps
for migration in self._get_dependency_objects("needed_by"):
self.dependents.add(migration)
migration.dependencies.add(self)
# And implicit ordering deps
previous = self.previous()
if previous:
self.dependencies.add(previous)
previous.dependents.add(self)
def invalidate_module(self):
"""
Removes the cached version of this migration's module import, so we
have to re-import it. Used when south.db.db changes.
"""
reload(self.migration())
self.migration._invalidate()
def forwards(self):
return self.migration_instance().forwards
def backwards(self):
return self.migration_instance().backwards
def forwards_plan(self):
"""
Returns a list of Migration objects to be applied, in order.
This list includes `self`, which will be applied last.
"""
return depends(self, lambda x: x.dependencies)
def _backwards_plan(self):
return depends(self, lambda x: x.dependents)
def backwards_plan(self):
"""
Returns a list of Migration objects to be unapplied, in order.
This list includes `self`, which will be unapplied last.
"""
return list(self._backwards_plan())
def is_before(self, other):
if self.migrations == other.migrations:
if self.filename < other.filename:
return True
return False
def is_after(self, other):
if self.migrations == other.migrations:
if self.filename > other.filename:
return True
return False
def prev_orm(self):
if getattr(self.migration_class(), 'symmetrical', False):
return self.orm()
previous = self.previous()
if previous is None:
# First migration? The 'previous ORM' is empty.
return FakeORM(None, self.app_label())
return previous.orm()
prev_orm = memoize(prev_orm)
def orm(self):
return FakeORM(self.migration_class(), self.app_label())
orm = memoize(orm)
def no_dry_run(self):
migration_class = self.migration_class()
try:
return migration_class.no_dry_run
except AttributeError:
return False
|
mpl-2.0
|
overtherain/scriptfile
|
software/googleAppEngine/lib/django_0_96/django/core/servers/fastcgi.py
|
32
|
5506
|
"""
FastCGI server that implements the WSGI protocol.
Uses the flup python package: http://www.saddi.com/software/flup/
This is a adaptation of the flup package to add FastCGI server support
to run Django apps from Web servers that support the FastCGI protocol.
This module can be run standalone or from the django-admin / manage.py
scripts using the "runfcgi" directive.
Run with the extra option "help" for a list of additional options you can
pass to this server.
"""
import sys, os
__version__ = "0.1"
__all__ = ["runfastcgi"]
FASTCGI_HELP = r"""runfcgi:
Run this project as a fastcgi application. To do this, the
flup package from http://www.saddi.com/software/flup/ is
required.
Usage:
django-admin.py runfcgi --settings=yourproject.settings [fcgi settings]
manage.py runfcgi [fcgi settings]
Optional Fcgi settings: (setting=value)
host=HOSTNAME hostname to listen on..
port=PORTNUM port to listen on.
socket=FILE UNIX socket to listen on.
method=IMPL prefork or threaded (default prefork)
maxrequests=NUMBER number of requests a child handles before it is
killed and a new child is forked (0 = no limit).
maxspare=NUMBER max number of spare processes / threads
minspare=NUMBER min number of spare processes / threads.
maxchildren=NUMBER hard limit number of processes / threads
daemonize=BOOL whether to detach from terminal.
pidfile=FILE write the spawned process-id to this file.
workdir=DIRECTORY change to this directory when daemonizing
Examples:
Run a "standard" fastcgi process on a file-descriptor
(for webservers which spawn your processes for you)
$ manage.py runfcgi method=threaded
Run a fastcgi server on a TCP host/port
$ manage.py runfcgi method=prefork host=127.0.0.1 port=8025
Run a fastcgi server on a UNIX domain socket (posix platforms only)
$ manage.py runfcgi method=prefork socket=/tmp/fcgi.sock
Run a fastCGI as a daemon and write the spawned PID in a file
$ manage.py runfcgi socket=/tmp/fcgi.sock method=prefork \
daemonize=true pidfile=/var/run/django-fcgi.pid
"""
FASTCGI_OPTIONS = {
'host': None,
'port': None,
'socket': None,
'method': 'fork',
'daemonize': None,
'workdir': '/',
'pidfile': None,
'maxspare': 5,
'minspare': 2,
'maxchildren': 50,
'maxrequests': 0,
}
def fastcgi_help(message=None):
print FASTCGI_HELP
if message:
print message
return False
def runfastcgi(argset=[], **kwargs):
options = FASTCGI_OPTIONS.copy()
options.update(kwargs)
for x in argset:
if "=" in x:
k, v = x.split('=', 1)
else:
k, v = x, True
options[k.lower()] = v
if "help" in options:
return fastcgi_help()
try:
import flup
except ImportError, e:
print >> sys.stderr, "ERROR: %s" % e
print >> sys.stderr, " Unable to load the flup package. In order to run django"
print >> sys.stderr, " as a FastCGI application, you will need to get flup from"
print >> sys.stderr, " http://www.saddi.com/software/flup/ If you've already"
print >> sys.stderr, " installed flup, then make sure you have it in your PYTHONPATH."
return False
if options['method'] in ('prefork', 'fork'):
from flup.server.fcgi_fork import WSGIServer
wsgi_opts = {
'maxSpare': int(options["maxspare"]),
'minSpare': int(options["minspare"]),
'maxChildren': int(options["maxchildren"]),
'maxRequests': int(options["maxrequests"]),
}
elif options['method'] in ('thread', 'threaded'):
from flup.server.fcgi import WSGIServer
wsgi_opts = {
'maxSpare': int(options["maxspare"]),
'minSpare': int(options["minspare"]),
'maxThreads': int(options["maxchildren"]),
}
else:
return fastcgi_help("ERROR: Implementation must be one of prefork or thread.")
wsgi_opts['debug'] = False # Turn off flup tracebacks
# Prep up and go
from django.core.handlers.wsgi import WSGIHandler
if options["host"] and options["port"] and not options["socket"]:
wsgi_opts['bindAddress'] = (options["host"], int(options["port"]))
elif options["socket"] and not options["host"] and not options["port"]:
wsgi_opts['bindAddress'] = options["socket"]
elif not options["socket"] and not options["host"] and not options["port"]:
wsgi_opts['bindAddress'] = None
else:
return fastcgi_help("Invalid combination of host, port, socket.")
if options["daemonize"] is None:
# Default to daemonizing if we're running on a socket/named pipe.
daemonize = (wsgi_opts['bindAddress'] is not None)
else:
if options["daemonize"].lower() in ('true', 'yes', 't'):
daemonize = True
elif options["daemonize"].lower() in ('false', 'no', 'f'):
daemonize = False
else:
return fastcgi_help("ERROR: Invalid option for daemonize parameter.")
if daemonize:
from django.utils.daemonize import become_daemon
become_daemon(our_home_dir=options["workdir"])
if options["pidfile"]:
fp = open(options["pidfile"], "w")
fp.write("%d\n" % os.getpid())
fp.close()
WSGIServer(WSGIHandler(), **wsgi_opts).run()
if __name__ == '__main__':
runfastcgi(sys.argv[1:])
|
mit
|
apporc/nova
|
nova/virt/xenapi/volumeops.py
|
17
|
9667
|
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for Storage-related functions (attach, detach, etc).
"""
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import strutils
from nova import exception
from nova.i18n import _LI, _LW
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import volume_utils
LOG = logging.getLogger(__name__)
class VolumeOps(object):
"""Management class for Volume-related tasks."""
def __init__(self, session):
self._session = session
def attach_volume(self, connection_info, instance_name, mountpoint,
hotplug=True):
"""Attach volume to VM instance."""
# TODO(johngarbutt) move this into _attach_volume_to_vm
dev_number = volume_utils.get_device_number(mountpoint)
vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name)
return self._attach_volume(connection_info, vm_ref,
instance_name, dev_number, hotplug)
def connect_volume(self, connection_info):
"""Attach volume to hypervisor, but not the VM."""
return self._attach_volume(connection_info)
def _attach_volume(self, connection_info, vm_ref=None, instance_name=None,
dev_number=None, hotplug=False):
self._check_is_supported_driver_type(connection_info)
connection_data = connection_info['data']
sr_ref, sr_uuid = self._connect_to_volume_provider(connection_data,
instance_name)
try:
vdi_ref = self._connect_hypervisor_to_volume(sr_ref,
connection_data)
vdi_uuid = self._session.VDI.get_uuid(vdi_ref)
LOG.info(_LI('Connected volume (vdi_uuid): %s'), vdi_uuid)
if vm_ref:
self._attach_volume_to_vm(vdi_ref, vm_ref, instance_name,
dev_number, hotplug)
return (sr_uuid, vdi_uuid)
except Exception:
with excutils.save_and_reraise_exception():
# NOTE(sirp): Forgetting the SR will have the effect of
# cleaning up the VDI and VBD records, so no need to handle
# that explicitly.
volume_utils.forget_sr(self._session, sr_ref)
def _check_is_supported_driver_type(self, connection_info):
driver_type = connection_info['driver_volume_type']
if driver_type not in ['iscsi', 'xensm']:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
def _connect_to_volume_provider(self, connection_data, instance_name):
sr_uuid, sr_label, sr_params = volume_utils.parse_sr_info(
connection_data, 'Disk-for:%s' % instance_name)
sr_ref = volume_utils.find_sr_by_uuid(self._session, sr_uuid)
if not sr_ref:
# introduce SR because not already present
sr_ref = volume_utils.introduce_sr(
self._session, sr_uuid, sr_label, sr_params)
return (sr_ref, sr_uuid)
def _connect_hypervisor_to_volume(self, sr_ref, connection_data):
# connection_data can have credentials in it so make sure to scrub
# those before logging.
LOG.debug("Connect volume to hypervisor: %s",
strutils.mask_password(connection_data))
if 'vdi_uuid' in connection_data:
vdi_ref = volume_utils.introduce_vdi(
self._session, sr_ref,
vdi_uuid=connection_data['vdi_uuid'])
elif 'target_lun' in connection_data:
vdi_ref = volume_utils.introduce_vdi(
self._session, sr_ref,
target_lun=connection_data['target_lun'])
else:
# NOTE(sirp): This will introduce the first VDI in the SR
vdi_ref = volume_utils.introduce_vdi(self._session, sr_ref)
return vdi_ref
def _attach_volume_to_vm(self, vdi_ref, vm_ref, instance_name, dev_number,
hotplug):
LOG.debug('Attach_volume vdi: %(vdi_ref)s vm: %(vm_ref)s',
{'vdi_ref': vdi_ref, 'vm_ref': vm_ref})
# osvol is added to the vbd so we can spot which vbds are volumes
vbd_ref = vm_utils.create_vbd(self._session, vm_ref, vdi_ref,
dev_number, bootable=False,
osvol=True)
if hotplug:
# NOTE(johngarbutt) can only call VBD.plug on a running vm
running = not vm_utils.is_vm_shutdown(self._session, vm_ref)
if running:
LOG.debug("Plugging VBD: %s", vbd_ref)
self._session.VBD.plug(vbd_ref, vm_ref)
LOG.info(_LI('Dev %(dev_number)s attached to'
' instance %(instance_name)s'),
{'instance_name': instance_name, 'dev_number': dev_number})
def detach_volume(self, connection_info, instance_name, mountpoint):
"""Detach volume storage to VM instance."""
LOG.debug("Detach_volume: %(instance_name)s, %(mountpoint)s",
{'instance_name': instance_name, 'mountpoint': mountpoint})
vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name)
device_number = volume_utils.get_device_number(mountpoint)
vbd_ref = volume_utils.find_vbd_by_number(self._session, vm_ref,
device_number)
if vbd_ref is None:
# NOTE(sirp): If we don't find the VBD then it must have been
# detached previously.
LOG.warning(_LW('Skipping detach because VBD for %s was '
'not found'), instance_name)
else:
self._detach_vbds_and_srs(vm_ref, [vbd_ref])
LOG.info(_LI('Mountpoint %(mountpoint)s detached from instance'
' %(instance_name)s'),
{'instance_name': instance_name,
'mountpoint': mountpoint})
def _detach_vbds_and_srs(self, vm_ref, vbd_refs):
is_vm_shutdown = vm_utils.is_vm_shutdown(self._session, vm_ref)
for vbd_ref in vbd_refs:
# find sr before we destroy the vbd
sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref)
if not is_vm_shutdown:
vm_utils.unplug_vbd(self._session, vbd_ref, vm_ref)
vm_utils.destroy_vbd(self._session, vbd_ref)
# Forget (i.e. disconnect) SR only if not in use
volume_utils.purge_sr(self._session, sr_ref)
def detach_all(self, vm_ref):
"""Detach all cinder volumes."""
vbd_refs = self._get_all_volume_vbd_refs(vm_ref)
if vbd_refs:
self._detach_vbds_and_srs(vm_ref, vbd_refs)
def _get_all_volume_vbd_refs(self, vm_ref):
"""Return VBD refs for all Nova/Cinder volumes."""
vbd_refs = self._session.VM.get_VBDs(vm_ref)
for vbd_ref in vbd_refs:
other_config = self._session.VBD.get_other_config(vbd_ref)
if other_config.get('osvol'):
yield vbd_ref
def find_bad_volumes(self, vm_ref):
"""Find any volumes with their connection severed.
Certain VM operations (e.g. `VM.start`, `VM.reboot`, etc.) will not
work when a VBD is present that points to a non-working volume. To work
around this, we scan for non-working volumes and detach them before
retrying a failed operation.
"""
bad_devices = []
vbd_refs = self._get_all_volume_vbd_refs(vm_ref)
for vbd_ref in vbd_refs:
sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref)
try:
# TODO(sirp): bug1152401 This relies on a 120 sec timeout
# within XenServer, update this to fail-fast when this is fixed
# upstream
self._session.SR.scan(sr_ref)
except self._session.XenAPI.Failure as exc:
if exc.details[0] == 'SR_BACKEND_FAILURE_40':
device = self._session.VBD.get_device(vbd_ref)
bad_devices.append('/dev/%s' % device)
else:
raise
return bad_devices
def safe_cleanup_from_vdis(self, vdi_refs):
# A helper method to detach volumes that are not associated with an
# instance
for vdi_ref in vdi_refs:
try:
sr_ref = volume_utils.find_sr_from_vdi(self._session, vdi_ref)
except exception.StorageError as exc:
LOG.debug(exc.format_message())
continue
try:
# Forget (i.e. disconnect) SR only if not in use
volume_utils.purge_sr(self._session, sr_ref)
except Exception:
LOG.debug('Ignoring error while purging sr: %s' % sr_ref,
exc_info=True)
|
apache-2.0
|
TNosredna/CouchPotatoServer
|
libs/tornado/process.py
|
18
|
10100
|
#!/usr/bin/env python
#
# Copyright 2011 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities for working with multiple processes, including both forking
the server into multiple processes and managing subprocesses.
"""
from __future__ import absolute_import, division, print_function, with_statement
import errno
import multiprocessing
import os
import signal
import subprocess
import sys
import time
from binascii import hexlify
from tornado import ioloop
from tornado.iostream import PipeIOStream
from tornado.log import gen_log
from tornado.platform.auto import set_close_exec
from tornado import stack_context
try:
long # py2
except NameError:
long = int # py3
def cpu_count():
"""Returns the number of processors on this machine."""
try:
return multiprocessing.cpu_count()
except NotImplementedError:
pass
try:
return os.sysconf("SC_NPROCESSORS_CONF")
except ValueError:
pass
gen_log.error("Could not detect number of processors; assuming 1")
return 1
def _reseed_random():
if 'random' not in sys.modules:
return
import random
# If os.urandom is available, this method does the same thing as
# random.seed (at least as of python 2.6). If os.urandom is not
# available, we mix in the pid in addition to a timestamp.
try:
seed = long(hexlify(os.urandom(16)), 16)
except NotImplementedError:
seed = int(time.time() * 1000) ^ os.getpid()
random.seed(seed)
def _pipe_cloexec():
r, w = os.pipe()
set_close_exec(r)
set_close_exec(w)
return r, w
_task_id = None
def fork_processes(num_processes, max_restarts=100):
"""Starts multiple worker processes.
If ``num_processes`` is None or <= 0, we detect the number of cores
available on this machine and fork that number of child
processes. If ``num_processes`` is given and > 0, we fork that
specific number of sub-processes.
Since we use processes and not threads, there is no shared memory
between any server code.
Note that multiple processes are not compatible with the autoreload
module (or the debug=True option to `tornado.web.Application`).
When using multiple processes, no IOLoops can be created or
referenced until after the call to ``fork_processes``.
In each child process, ``fork_processes`` returns its *task id*, a
number between 0 and ``num_processes``. Processes that exit
abnormally (due to a signal or non-zero exit status) are restarted
with the same id (up to ``max_restarts`` times). In the parent
process, ``fork_processes`` returns None if all child processes
have exited normally, but will otherwise only exit by throwing an
exception.
"""
global _task_id
assert _task_id is None
if num_processes is None or num_processes <= 0:
num_processes = cpu_count()
if ioloop.IOLoop.initialized():
raise RuntimeError("Cannot run in multiple processes: IOLoop instance "
"has already been initialized. You cannot call "
"IOLoop.instance() before calling start_processes()")
gen_log.info("Starting %d processes", num_processes)
children = {}
def start_child(i):
pid = os.fork()
if pid == 0:
# child process
_reseed_random()
global _task_id
_task_id = i
return i
else:
children[pid] = i
return None
for i in range(num_processes):
id = start_child(i)
if id is not None:
return id
num_restarts = 0
while children:
try:
pid, status = os.wait()
except OSError as e:
if e.errno == errno.EINTR:
continue
raise
if pid not in children:
continue
id = children.pop(pid)
if os.WIFSIGNALED(status):
gen_log.warning("child %d (pid %d) killed by signal %d, restarting",
id, pid, os.WTERMSIG(status))
elif os.WEXITSTATUS(status) != 0:
gen_log.warning("child %d (pid %d) exited with status %d, restarting",
id, pid, os.WEXITSTATUS(status))
else:
gen_log.info("child %d (pid %d) exited normally", id, pid)
continue
num_restarts += 1
if num_restarts > max_restarts:
raise RuntimeError("Too many child restarts, giving up")
new_id = start_child(id)
if new_id is not None:
return new_id
# All child processes exited cleanly, so exit the master process
# instead of just returning to right after the call to
# fork_processes (which will probably just start up another IOLoop
# unless the caller checks the return value).
sys.exit(0)
def task_id():
"""Returns the current task id, if any.
Returns None if this process was not created by `fork_processes`.
"""
global _task_id
return _task_id
class Subprocess(object):
"""Wraps ``subprocess.Popen`` with IOStream support.
The constructor is the same as ``subprocess.Popen`` with the following
additions:
* ``stdin``, ``stdout``, and ``stderr`` may have the value
``tornado.process.Subprocess.STREAM``, which will make the corresponding
attribute of the resulting Subprocess a `.PipeIOStream`.
* A new keyword argument ``io_loop`` may be used to pass in an IOLoop.
"""
STREAM = object()
_initialized = False
_waiting = {}
def __init__(self, *args, **kwargs):
self.io_loop = kwargs.pop('io_loop', None) or ioloop.IOLoop.current()
to_close = []
if kwargs.get('stdin') is Subprocess.STREAM:
in_r, in_w = _pipe_cloexec()
kwargs['stdin'] = in_r
to_close.append(in_r)
self.stdin = PipeIOStream(in_w, io_loop=self.io_loop)
if kwargs.get('stdout') is Subprocess.STREAM:
out_r, out_w = _pipe_cloexec()
kwargs['stdout'] = out_w
to_close.append(out_w)
self.stdout = PipeIOStream(out_r, io_loop=self.io_loop)
if kwargs.get('stderr') is Subprocess.STREAM:
err_r, err_w = _pipe_cloexec()
kwargs['stderr'] = err_w
to_close.append(err_w)
self.stderr = PipeIOStream(err_r, io_loop=self.io_loop)
self.proc = subprocess.Popen(*args, **kwargs)
for fd in to_close:
os.close(fd)
for attr in ['stdin', 'stdout', 'stderr', 'pid']:
if not hasattr(self, attr): # don't clobber streams set above
setattr(self, attr, getattr(self.proc, attr))
self._exit_callback = None
self.returncode = None
def set_exit_callback(self, callback):
"""Runs ``callback`` when this process exits.
The callback takes one argument, the return code of the process.
This method uses a ``SIGCHILD`` handler, which is a global setting
and may conflict if you have other libraries trying to handle the
same signal. If you are using more than one ``IOLoop`` it may
be necessary to call `Subprocess.initialize` first to designate
one ``IOLoop`` to run the signal handlers.
In many cases a close callback on the stdout or stderr streams
can be used as an alternative to an exit callback if the
signal handler is causing a problem.
"""
self._exit_callback = stack_context.wrap(callback)
Subprocess.initialize(self.io_loop)
Subprocess._waiting[self.pid] = self
Subprocess._try_cleanup_process(self.pid)
@classmethod
def initialize(cls, io_loop=None):
"""Initializes the ``SIGCHILD`` handler.
The signal handler is run on an `.IOLoop` to avoid locking issues.
Note that the `.IOLoop` used for signal handling need not be the
same one used by individual Subprocess objects (as long as the
``IOLoops`` are each running in separate threads).
"""
if cls._initialized:
return
if io_loop is None:
io_loop = ioloop.IOLoop.current()
cls._old_sigchld = signal.signal(
signal.SIGCHLD,
lambda sig, frame: io_loop.add_callback_from_signal(cls._cleanup))
cls._initialized = True
@classmethod
def uninitialize(cls):
"""Removes the ``SIGCHILD`` handler."""
if not cls._initialized:
return
signal.signal(signal.SIGCHLD, cls._old_sigchld)
cls._initialized = False
@classmethod
def _cleanup(cls):
for pid in list(cls._waiting.keys()): # make a copy
cls._try_cleanup_process(pid)
@classmethod
def _try_cleanup_process(cls, pid):
try:
ret_pid, status = os.waitpid(pid, os.WNOHANG)
except OSError as e:
if e.args[0] == errno.ECHILD:
return
if ret_pid == 0:
return
assert ret_pid == pid
subproc = cls._waiting.pop(pid)
subproc.io_loop.add_callback_from_signal(
subproc._set_returncode, status)
def _set_returncode(self, status):
if os.WIFSIGNALED(status):
self.returncode = -os.WTERMSIG(status)
else:
assert os.WIFEXITED(status)
self.returncode = os.WEXITSTATUS(status)
if self._exit_callback:
callback = self._exit_callback
self._exit_callback = None
callback(self.returncode)
|
gpl-3.0
|
dwlehman/blivet
|
blivet/errors.py
|
2
|
4729
|
# errors.py
# Exception classes for anaconda's storage configuration module.
#
# Copyright (C) 2009 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Dave Lehman <[email protected]>
#
from .i18n import N_
class StorageError(Exception):
def __init__(self, *args, **kwargs):
self.hardware_fault = kwargs.pop("hardware_fault", False)
super(StorageError, self).__init__(*args, **kwargs)
class NoDisksError(StorageError):
pass
# Device
class DeviceError(StorageError):
pass
class DeviceCreateError(DeviceError):
pass
class DeviceDestroyError(DeviceError):
pass
class DeviceResizeError(DeviceError):
pass
class DeviceSetupError(DeviceError):
pass
class DeviceTeardownError(DeviceError):
pass
class DeviceUserDeniedFormatError(DeviceError):
pass
# DeviceFormat
class DeviceFormatError(StorageError):
pass
class FormatCreateError(DeviceFormatError):
pass
class FormatDestroyError(DeviceFormatError):
pass
class FormatSetupError(DeviceFormatError):
pass
class FormatTeardownError(DeviceFormatError):
pass
class DMRaidMemberError(DeviceFormatError):
pass
class MultipathMemberError(DeviceFormatError):
pass
class FSError(DeviceFormatError):
pass
class FSResizeError(FSError):
def __init__(self, message, details):
FSError.__init__(self, message)
self.details = details
class LUKSError(DeviceFormatError):
pass
class MDMemberError(DeviceFormatError):
pass
class PhysicalVolumeError(DeviceFormatError):
pass
class SinglePhysicalVolumeError(DeviceFormatError):
pass
class SwapSpaceError(DeviceFormatError):
pass
class DiskLabelError(DeviceFormatError):
pass
class InvalidDiskLabelError(DiskLabelError):
pass
class DiskLabelCommitError(DiskLabelError):
pass
# devicelibs
class RaidError(StorageError):
pass
class DMError(StorageError):
pass
class MPathError(StorageError):
pass
class BTRFSError(StorageError):
pass
class BTRFSValueError(BTRFSError, ValueError):
pass
# DeviceTree
class DeviceTreeError(StorageError):
pass
class DeviceNotFoundError(StorageError):
pass
class UnusableConfigurationError(StorageError):
""" User has an unusable initial storage configuration. """
suggestion = ""
class DiskLabelScanError(UnusableConfigurationError):
suggestion = N_("For some reason we were unable to locate a disklabel on a "
"disk that the kernel is reporting partitions on. It is "
"unclear what the exact problem is. Please file a bug at "
"http://bugzilla.redhat.com")
class CorruptGPTError(UnusableConfigurationError):
suggestion = N_("Either restore the disklabel to a completely working "
"state or remove it completely.\n"
"Hint: parted can restore it or wipefs can remove it.")
class DuplicateVGError(UnusableConfigurationError):
suggestion = N_("Rename one of the volume groups so the names are "
"distinct.\n"
"Hint 1: vgrename accepts UUID in place of the old name.\n"
"Hint 2: You can get the VG UUIDs by running "
"'pvs -o +vg_uuid'.")
# DeviceAction
class DeviceActionError(StorageError):
pass
# partitioning
class PartitioningError(StorageError):
pass
class NotEnoughFreeSpaceError(StorageError):
pass
# udev
class UdevError(StorageError):
pass
# fstab
class UnrecognizedFSTabEntryError(StorageError):
pass
class FSTabTypeMismatchError(StorageError):
pass
# dasd
class DasdFormatError(StorageError):
pass
# size
class SizePlacesError(StorageError):
pass
# probing
class UnknownSourceDeviceError(StorageError):
pass
# factories
class DeviceFactoryError(StorageError):
pass
|
lgpl-2.1
|
gram526/VTK
|
ThirdParty/Twisted/twisted/internet/wxreactor.py
|
80
|
5221
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module provides wxPython event loop support for Twisted.
In order to use this support, simply do the following::
| from twisted.internet import wxreactor
| wxreactor.install()
Then, when your root wxApp has been created::
| from twisted.internet import reactor
| reactor.registerWxApp(yourApp)
| reactor.run()
Then use twisted.internet APIs as usual. Stop the event loop using
reactor.stop(), not yourApp.ExitMainLoop().
IMPORTANT: tests will fail when run under this reactor. This is
expected and probably does not reflect on the reactor's ability to run
real applications.
"""
import Queue
try:
from wx import PySimpleApp as wxPySimpleApp, CallAfter as wxCallAfter, \
Timer as wxTimer
except ImportError:
# older version of wxPython:
from wxPython.wx import wxPySimpleApp, wxCallAfter, wxTimer
from twisted.python import log, runtime
from twisted.internet import _threadedselect
class ProcessEventsTimer(wxTimer):
"""
Timer that tells wx to process pending events.
This is necessary on OS X, probably due to a bug in wx, if we want
wxCallAfters to be handled when modal dialogs, menus, etc. are open.
"""
def __init__(self, wxapp):
wxTimer.__init__(self)
self.wxapp = wxapp
def Notify(self):
"""
Called repeatedly by wx event loop.
"""
self.wxapp.ProcessPendingEvents()
class WxReactor(_threadedselect.ThreadedSelectReactor):
"""
wxPython reactor.
wxPython drives the event loop, select() runs in a thread.
"""
_stopping = False
def registerWxApp(self, wxapp):
"""
Register wxApp instance with the reactor.
"""
self.wxapp = wxapp
def _installSignalHandlersAgain(self):
"""
wx sometimes removes our own signal handlers, so re-add them.
"""
try:
# make _handleSignals happy:
import signal
signal.signal(signal.SIGINT, signal.default_int_handler)
except ImportError:
return
self._handleSignals()
def stop(self):
"""
Stop the reactor.
"""
if self._stopping:
return
self._stopping = True
_threadedselect.ThreadedSelectReactor.stop(self)
def _runInMainThread(self, f):
"""
Schedule function to run in main wx/Twisted thread.
Called by the select() thread.
"""
if hasattr(self, "wxapp"):
wxCallAfter(f)
else:
# wx shutdown but twisted hasn't
self._postQueue.put(f)
def _stopWx(self):
"""
Stop the wx event loop if it hasn't already been stopped.
Called during Twisted event loop shutdown.
"""
if hasattr(self, "wxapp"):
self.wxapp.ExitMainLoop()
def run(self, installSignalHandlers=True):
"""
Start the reactor.
"""
self._postQueue = Queue.Queue()
if not hasattr(self, "wxapp"):
log.msg("registerWxApp() was not called on reactor, "
"registering my own wxApp instance.")
self.registerWxApp(wxPySimpleApp())
# start select() thread:
self.interleave(self._runInMainThread,
installSignalHandlers=installSignalHandlers)
if installSignalHandlers:
self.callLater(0, self._installSignalHandlersAgain)
# add cleanup events:
self.addSystemEventTrigger("after", "shutdown", self._stopWx)
self.addSystemEventTrigger("after", "shutdown",
lambda: self._postQueue.put(None))
# On Mac OS X, work around wx bug by starting timer to ensure
# wxCallAfter calls are always processed. We don't wake up as
# often as we could since that uses too much CPU.
if runtime.platform.isMacOSX():
t = ProcessEventsTimer(self.wxapp)
t.Start(2) # wake up every 2ms
self.wxapp.MainLoop()
wxapp = self.wxapp
del self.wxapp
if not self._stopping:
# wx event loop exited without reactor.stop() being
# called. At this point events from select() thread will
# be added to _postQueue, but some may still be waiting
# unprocessed in wx, thus the ProcessPendingEvents()
# below.
self.stop()
wxapp.ProcessPendingEvents() # deal with any queued wxCallAfters
while 1:
try:
f = self._postQueue.get(timeout=0.01)
except Queue.Empty:
continue
else:
if f is None:
break
try:
f()
except:
log.err()
def install():
"""
Configure the twisted mainloop to be run inside the wxPython mainloop.
"""
reactor = WxReactor()
from twisted.internet.main import installReactor
installReactor(reactor)
return reactor
__all__ = ['install']
|
bsd-3-clause
|
marissazhou/django
|
django/conf/locale/mk/formats.py
|
504
|
1742
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y'
SHORT_DATETIME_FORMAT = 'j.m.Y H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%d. %m. %Y', '%d. %m. %y', # '25. 10. 2006', '25. 10. 06'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
'%d. %m. %Y %H:%M:%S', # '25. 10. 2006 14:30:59'
'%d. %m. %Y %H:%M:%S.%f', # '25. 10. 2006 14:30:59.000200'
'%d. %m. %Y %H:%M', # '25. 10. 2006 14:30'
'%d. %m. %Y', # '25. 10. 2006'
'%d. %m. %y %H:%M:%S', # '25. 10. 06 14:30:59'
'%d. %m. %y %H:%M:%S.%f', # '25. 10. 06 14:30:59.000200'
'%d. %m. %y %H:%M', # '25. 10. 06 14:30'
'%d. %m. %y', # '25. 10. 06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
bsd-3-clause
|
popazerty/EG-2
|
lib/python/Components/Converter/ServiceInfo.py
|
7
|
9467
|
from Components.Converter.Converter import Converter
from enigma import iServiceInformation, iPlayableService
from Components.Element import cached
from os import path
WIDESCREEN = [3, 4, 7, 8, 0xB, 0xC, 0xF, 0x10]
class ServiceInfo(Converter, object):
HAS_TELETEXT = 1
IS_MULTICHANNEL = 2
AUDIO_STEREO = 3
IS_CRYPTED = 4
IS_WIDESCREEN = 5
IS_NOT_WIDESCREEN = 6
SUBSERVICES_AVAILABLE = 7
XRES = 8
YRES = 9
APID = 10
VPID = 11
PCRPID = 12
PMTPID = 13
TXTPID = 14
TSID = 15
ONID = 16
SID = 17
FRAMERATE = 18
TRANSFERBPS = 19
HAS_HBBTV = 20
AUDIOTRACKS_AVAILABLE = 21
SUBTITLES_AVAILABLE = 22
EDITMODE = 23
IS_STREAM = 24
IS_SD = 25
IS_HD = 26
IS_1080 = 27
IS_720 = 28
IS_576 = 29
IS_480 = 30
def __init__(self, type):
Converter.__init__(self, type)
self.type, self.interesting_events = {
"HasTelext": (self.HAS_TELETEXT, (iPlayableService.evUpdatedInfo,)),
"IsMultichannel": (self.IS_MULTICHANNEL, (iPlayableService.evUpdatedInfo,)),
"IsStereo": (self.AUDIO_STEREO, (iPlayableService.evUpdatedInfo,)),
"IsCrypted": (self.IS_CRYPTED, (iPlayableService.evUpdatedInfo,)),
"IsWidescreen": (self.IS_WIDESCREEN, (iPlayableService.evVideoSizeChanged,)),
"IsNotWidescreen": (self.IS_NOT_WIDESCREEN, (iPlayableService.evVideoSizeChanged,)),
"SubservicesAvailable": (self.SUBSERVICES_AVAILABLE, (iPlayableService.evUpdatedEventInfo,)),
"VideoWidth": (self.XRES, (iPlayableService.evVideoSizeChanged,)),
"VideoHeight": (self.YRES, (iPlayableService.evVideoSizeChanged,)),
"AudioPid": (self.APID, (iPlayableService.evUpdatedInfo,)),
"VideoPid": (self.VPID, (iPlayableService.evUpdatedInfo,)),
"PcrPid": (self.PCRPID, (iPlayableService.evUpdatedInfo,)),
"PmtPid": (self.PMTPID, (iPlayableService.evUpdatedInfo,)),
"TxtPid": (self.TXTPID, (iPlayableService.evUpdatedInfo,)),
"TsId": (self.TSID, (iPlayableService.evUpdatedInfo,)),
"OnId": (self.ONID, (iPlayableService.evUpdatedInfo,)),
"Sid": (self.SID, (iPlayableService.evUpdatedInfo,)),
"Framerate": (self.FRAMERATE, (iPlayableService.evVideoSizeChanged,iPlayableService.evUpdatedInfo,)),
"TransferBPS": (self.TRANSFERBPS, (iPlayableService.evUpdatedInfo,)),
"HasHBBTV": (self.HAS_HBBTV, (iPlayableService.evUpdatedInfo,iPlayableService.evHBBTVInfo,)),
"AudioTracksAvailable": (self.AUDIOTRACKS_AVAILABLE, (iPlayableService.evUpdatedInfo,)),
"SubtitlesAvailable": (self.SUBTITLES_AVAILABLE, (iPlayableService.evUpdatedInfo,)),
"Editmode": (self.EDITMODE, (iPlayableService.evUpdatedInfo,)),
"IsStream": (self.IS_STREAM, (iPlayableService.evUpdatedInfo,)),
"IsSD": (self.IS_SD, (iPlayableService.evVideoSizeChanged,)),
"IsHD": (self.IS_HD, (iPlayableService.evVideoSizeChanged,)),
"Is1080": (self.IS_1080, (iPlayableService.evVideoSizeChanged,)),
"Is720": (self.IS_720, (iPlayableService.evVideoSizeChanged,)),
"Is576": (self.IS_576, (iPlayableService.evVideoSizeChanged,)),
"Is480": (self.IS_480, (iPlayableService.evVideoSizeChanged,)),
}[type]
def getServiceInfoString(self, info, what, convert = lambda x: "%d" % x):
v = info.getInfo(what)
if v == -1:
return "N/A"
if v == -2:
return info.getInfoString(what)
return convert(v)
@cached
def getBoolean(self):
service = self.source.service
info = service and service.info()
if not info:
return False
video_height = None
video_aspect = None
try:
f = open("/proc/stb/vmpeg/0/yres", "r")
video_height = int(f.read(),16)
f.close()
except:
video_height = int(info.getInfo(iServiceInformation.sVideoHeight))
video_aspect = info.getInfo(iServiceInformation.sAspect)
if self.type == self.HAS_TELETEXT:
tpid = info.getInfo(iServiceInformation.sTXTPID)
return tpid != -1
elif self.type in (self.IS_MULTICHANNEL, self.AUDIO_STEREO):
# FIXME. but currently iAudioTrackInfo doesn't provide more information.
audio = service.audioTracks()
if audio:
n = audio.getNumberOfTracks()
idx = 0
while idx < n:
i = audio.getTrackInfo(idx)
description = i.getDescription()
if description in ("AC3", "AC-3", "DTS"):
if self.type == self.IS_MULTICHANNEL:
return True
elif self.type == self.AUDIO_STEREO:
return False
idx += 1
if self.type == self.IS_MULTICHANNEL:
return False
elif self.type == self.AUDIO_STEREO:
return True
return False
elif self.type == self.IS_CRYPTED:
return info.getInfo(iServiceInformation.sIsCrypted) == 1
elif self.type == self.IS_WIDESCREEN:
return video_aspect in WIDESCREEN
elif self.type == self.IS_NOT_WIDESCREEN:
return video_aspect not in WIDESCREEN
elif self.type == self.SUBSERVICES_AVAILABLE:
subservices = service.subServices()
return subservices and subservices.getNumberOfSubservices() > 0
elif self.type == self.HAS_HBBTV:
return info.getInfoString(iServiceInformation.sHBBTVUrl) != ""
elif self.type == self.AUDIOTRACKS_AVAILABLE:
audio = service.audioTracks()
return audio and audio.getNumberOfTracks() > 1
elif self.type == self.SUBTITLES_AVAILABLE:
subtitle = service and service.subtitle()
subtitlelist = subtitle and subtitle.getSubtitleList()
if subtitlelist:
return len(subtitlelist) > 0
return False
elif self.type == self.EDITMODE:
return hasattr(self.source, "editmode") and not not self.source.editmode
elif self.type == self.IS_STREAM:
return service.streamed() is not None
elif self.type == self.IS_SD:
return video_height < 720
elif self.type == self.IS_HD:
return video_height >= 720
elif self.type == self.IS_1080:
return video_height > 1000 and video_height <= 1080
elif self.type == self.IS_720:
return video_height > 700 and video_height <= 720
elif self.type == self.IS_576:
return video_height > 500 and video_height <= 576
elif self.type == self.IS_480:
return video_height > 0 and video_height <= 480
return False
boolean = property(getBoolean)
@cached
def getText(self):
service = self.source.service
info = service and service.info()
if not info:
return ""
if self.type == self.XRES:
video_width = None
if path.exists("/proc/stb/vmpeg/0/xres"):
f = open("/proc/stb/vmpeg/0/xres", "r")
video_width = int(f.read(),16)
f.close()
if not video_width:
video_width = int(self.getServiceInfoString(info, iServiceInformation.sVideoWidth))
return "%d" % video_width
elif self.type == self.YRES:
video_height = None
if path.exists("/proc/stb/vmpeg/0/yres"):
f = open("/proc/stb/vmpeg/0/yres", "r")
video_height = int(f.read(),16)
f.close()
if not video_height:
video_height = int(self.getServiceInfoString(info, iServiceInformation.sVideoHeight))
return "%d" % video_height
elif self.type == self.APID:
return self.getServiceInfoString(info, iServiceInformation.sAudioPID)
elif self.type == self.VPID:
return self.getServiceInfoString(info, iServiceInformation.sVideoPID)
elif self.type == self.PCRPID:
return self.getServiceInfoString(info, iServiceInformation.sPCRPID)
elif self.type == self.PMTPID:
return self.getServiceInfoString(info, iServiceInformation.sPMTPID)
elif self.type == self.TXTPID:
return self.getServiceInfoString(info, iServiceInformation.sTXTPID)
elif self.type == self.TSID:
return self.getServiceInfoString(info, iServiceInformation.sTSID)
elif self.type == self.ONID:
return self.getServiceInfoString(info, iServiceInformation.sONID)
elif self.type == self.SID:
return self.getServiceInfoString(info, iServiceInformation.sSID)
elif self.type == self.FRAMERATE:
video_rate = None
if path.exists("/proc/stb/vmpeg/0/framerate"):
f = open("/proc/stb/vmpeg/0/framerate", "r")
video_rate = int(f.read())
f.close()
if not video_rate:
video_rate = int(self.getServiceInfoString(info, iServiceInformation.sFrameRate))
return video_rate, lambda x: "%d fps" % ((x+500)/1000)
elif self.type == self.TRANSFERBPS:
return self.getServiceInfoString(info, iServiceInformation.sTransferBPS, lambda x: "%d kB/s" % (x/1024))
elif self.type == self.HAS_HBBTV:
return info.getInfoString(iServiceInformation.sHBBTVUrl)
return ""
text = property(getText)
@cached
def getValue(self):
service = self.source.service
info = service and service.info()
if not info:
return -1
if self.type == self.XRES:
video_width = None
if path.exists("/proc/stb/vmpeg/0/xres"):
f = open("/proc/stb/vmpeg/0/xres", "r")
video_width = int(f.read(),16)
f.close()
if not video_width:
video_width = info.getInfo(iServiceInformation.sVideoWidth)
return str(video_width)
elif self.type == self.YRES:
video_height = None
if path.exists("/proc/stb/vmpeg/0/yres"):
f = open("/proc/stb/vmpeg/0/yres", "r")
video_height = int(f.read(),16)
f.close()
if not video_height:
video_height = info.getInfo(iServiceInformation.sVideoHeight)
return str(video_height)
elif self.type == self.FRAMERATE:
video_rate = None
if path.exists("/proc/stb/vmpeg/0/framerate"):
f = open("/proc/stb/vmpeg/0/framerate", "r")
video_rate = f.read()
f.close()
if not video_rate:
video_rate = info.getInfo(iServiceInformation.sFrameRate)
return str(video_rate)
return -1
value = property(getValue)
def changed(self, what):
if what[0] != self.CHANGED_SPECIFIC or what[1] in self.interesting_events:
Converter.changed(self, what)
|
gpl-2.0
|
SickGear/SickGear
|
lib/urllib3/packages/ssl_match_hostname/_implementation.py
|
48
|
5679
|
"""The match_hostname() function from Python 3.3.3, essential when using SSL."""
# Note: This file is under the PSF license as the code comes from the python
# stdlib. http://docs.python.org/3/license.html
import re
import sys
# ipaddress has been backported to 2.6+ in pypi. If it is installed on the
# system, use it to handle IPAddress ServerAltnames (this was added in
# python-3.5) otherwise only do DNS matching. This allows
# backports.ssl_match_hostname to continue to be used in Python 2.7.
try:
import ipaddress
except ImportError:
ipaddress = None
__version__ = "3.5.0.1"
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r".")
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count("*")
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn)
)
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == "*":
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append("[^.]+")
elif leftmost.startswith("xn--") or hostname.startswith("xn--"):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r"\*", "[^.]*"))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r"\A" + r"\.".join(pats) + r"\Z", re.IGNORECASE)
return pat.match(hostname)
def _to_unicode(obj):
if isinstance(obj, str) and sys.version_info < (3,):
obj = unicode(obj, encoding="ascii", errors="strict")
return obj
def _ipaddress_match(ipname, host_ip):
"""Exact matching of IP addresses.
RFC 6125 explicitly doesn't define an algorithm for this
(section 1.7.2 - "Out of Scope").
"""
# OpenSSL may add a trailing newline to a subjectAltName's IP address
# Divergence from upstream: ipaddress can't handle byte str
ip = ipaddress.ip_address(_to_unicode(ipname).rstrip())
return ip == host_ip
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError(
"empty or no certificate, match_hostname needs a "
"SSL socket or SSL context with either "
"CERT_OPTIONAL or CERT_REQUIRED"
)
try:
# Divergence from upstream: ipaddress can't handle byte str
host_ip = ipaddress.ip_address(_to_unicode(hostname))
except ValueError:
# Not an IP address (common case)
host_ip = None
except UnicodeError:
# Divergence from upstream: Have to deal with ipaddress not taking
# byte strings. addresses should be all ascii, so we consider it not
# an ipaddress in this case
host_ip = None
except AttributeError:
# Divergence from upstream: Make ipaddress library optional
if ipaddress is None:
host_ip = None
else:
raise
dnsnames = []
san = cert.get("subjectAltName", ())
for key, value in san:
if key == "DNS":
if host_ip is None and _dnsname_match(value, hostname):
return
dnsnames.append(value)
elif key == "IP Address":
if host_ip is not None and _ipaddress_match(value, host_ip):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get("subject", ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == "commonName":
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError(
"hostname %r "
"doesn't match either of %s" % (hostname, ", ".join(map(repr, dnsnames)))
)
elif len(dnsnames) == 1:
raise CertificateError("hostname %r doesn't match %r" % (hostname, dnsnames[0]))
else:
raise CertificateError(
"no appropriate commonName or subjectAltName fields were found"
)
|
gpl-3.0
|
salfab/CouchPotatoServer
|
libs/sqlalchemy/connectors/mxodbc.py
|
18
|
5517
|
# connectors/mxodbc.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Provide an SQLALchemy connector for the eGenix mxODBC commercial
Python adapter for ODBC. This is not a free product, but eGenix
provides SQLAlchemy with a license for use in continuous integration
testing.
This has been tested for use with mxODBC 3.1.2 on SQL Server 2005
and 2008, using the SQL Server Native driver. However, it is
possible for this to be used on other database platforms.
For more info on mxODBC, see http://www.egenix.com/
"""
import sys
import re
import warnings
from sqlalchemy.connectors import Connector
class MxODBCConnector(Connector):
driver='mxodbc'
supports_sane_multi_rowcount = False
supports_unicode_statements = False
supports_unicode_binds = False
supports_native_decimal = True
@classmethod
def dbapi(cls):
# this classmethod will normally be replaced by an instance
# attribute of the same name, so this is normally only called once.
cls._load_mx_exceptions()
platform = sys.platform
if platform == 'win32':
from mx.ODBC import Windows as module
# this can be the string "linux2", and possibly others
elif 'linux' in platform:
from mx.ODBC import unixODBC as module
elif platform == 'darwin':
from mx.ODBC import iODBC as module
else:
raise ImportError, "Unrecognized platform for mxODBC import"
return module
@classmethod
def _load_mx_exceptions(cls):
""" Import mxODBC exception classes into the module namespace,
as if they had been imported normally. This is done here
to avoid requiring all SQLAlchemy users to install mxODBC.
"""
global InterfaceError, ProgrammingError
from mx.ODBC import InterfaceError
from mx.ODBC import ProgrammingError
def on_connect(self):
def connect(conn):
conn.stringformat = self.dbapi.MIXED_STRINGFORMAT
conn.datetimeformat = self.dbapi.PYDATETIME_DATETIMEFORMAT
conn.decimalformat = self.dbapi.DECIMAL_DECIMALFORMAT
conn.errorhandler = self._error_handler()
return connect
def _error_handler(self):
""" Return a handler that adjusts mxODBC's raised Warnings to
emit Python standard warnings.
"""
from mx.ODBC.Error import Warning as MxOdbcWarning
def error_handler(connection, cursor, errorclass, errorvalue):
if issubclass(errorclass, MxOdbcWarning):
errorclass.__bases__ = (Warning,)
warnings.warn(message=str(errorvalue),
category=errorclass,
stacklevel=2)
else:
raise errorclass, errorvalue
return error_handler
def create_connect_args(self, url):
""" Return a tuple of *args,**kwargs for creating a connection.
The mxODBC 3.x connection constructor looks like this:
connect(dsn, user='', password='',
clear_auto_commit=1, errorhandler=None)
This method translates the values in the provided uri
into args and kwargs needed to instantiate an mxODBC Connection.
The arg 'errorhandler' is not used by SQLAlchemy and will
not be populated.
"""
opts = url.translate_connect_args(username='user')
opts.update(url.query)
args = opts.pop('host')
opts.pop('port', None)
opts.pop('database', None)
return (args,), opts
def is_disconnect(self, e, connection, cursor):
# TODO: eGenix recommends checking connection.closed here
# Does that detect dropped connections ?
if isinstance(e, self.dbapi.ProgrammingError):
return "connection already closed" in str(e)
elif isinstance(e, self.dbapi.Error):
return '[08S01]' in str(e)
else:
return False
def _get_server_version_info(self, connection):
# eGenix suggests using conn.dbms_version instead
# of what we're doing here
dbapi_con = connection.connection
version = []
r = re.compile('[.\-]')
# 18 == pyodbc.SQL_DBMS_VER
for n in r.split(dbapi_con.getinfo(18)[1]):
try:
version.append(int(n))
except ValueError:
version.append(n)
return tuple(version)
def do_execute(self, cursor, statement, parameters, context=None):
if context:
native_odbc_execute = context.execution_options.\
get('native_odbc_execute', 'auto')
if native_odbc_execute is True:
# user specified native_odbc_execute=True
cursor.execute(statement, parameters)
elif native_odbc_execute is False:
# user specified native_odbc_execute=False
cursor.executedirect(statement, parameters)
elif context.is_crud:
# statement is UPDATE, DELETE, INSERT
cursor.execute(statement, parameters)
else:
# all other statements
cursor.executedirect(statement, parameters)
else:
cursor.executedirect(statement, parameters)
|
gpl-3.0
|
1kastner/analyse_weather_data
|
gather_weather_data/wunderground/summarize_raw_airport_data.py
|
1
|
8898
|
"""
Summarize all downloaded airport weather station data files.
Uses UTC time zone.
Use
-m gather_weather_data.wunderground.summarize_raw_airport_data
to run the demo
"""
import os
import json
import datetime
import logging
import numpy
import pandas
import metar.Metar # needs https://github.com/tomp/python-metar/pull/25 to work stable
from . import WUNDERGROUND_RAW_AIRPORT_DATA_DIR
from . import PROCESSED_DATA_DIR
from .summarize_raw_data import _parse_utc_date
from .summarize_raw_data import _cast_number
from .summarize_raw_data import _get_file_name
HEADER_FORMAT = ("{datetime},{temperature},{dewpoint},{windspeed},{windgust},{winddirection},{pressure},{humidity},"
"{precipitation},{cloudcover}")
def _get_header():
"""
:return: Formatted header complying csv standards
"""
return HEADER_FORMAT.replace("{", "").replace("}", "")
def max_of_total_order(collection_of_interest, given_total_order):
"""
:param collection_of_interest: Find the maximum in this collection
:param given_total_order: Describe the total order to use on the collection
:return: max element
"""
l = [given_total_order.index(e) for e in collection_of_interest]
return given_total_order[max(l)]
def get_cloud_cover(metar_string, date_of_observation):
"""
This needs a small modification as described in https://github.com/tomp/python-metar/pull/25
:param metar_string: A classical meteorological METAR
:param date_of_observation: Used to parse the metar at hand
:return: The cloud cover name
"""
d = date_of_observation
m = metar.Metar.Metar(
metar_string,
d.month,
d.year,
drop_unsupported_observations=True
)
cloud_cover = "CAVOC" # 0 octas
if not m.sky:
return cloud_cover
else:
sorted_possible_cloud_covers = [
"SKC", "CLR", "NSC", # 0 octas
"FEW", # 1-2 octas
"SCT", # 3-4 octas
"BKN", # 5-7 octas
"OVC", # 8 octas
"VV", # clouds can not be seen because of fog or rain
]
sky_covers = [cover for (cover, height, cloud) in m.sky]
return max_of_total_order(sky_covers, sorted_possible_cloud_covers)
def _get_data_for_single_day(station, day):
"""
At the current time the day provided is interpreted as local time at wunderground.
:param station: The name of the station, e.g. 'IHAMBURG69'
:param day: The day to pick the json from
:return: A valid csv file content with header
:rtype: str
"""
json_file_name = _get_file_name(station, day, 'json')
json_file_path = os.path.join(WUNDERGROUND_RAW_AIRPORT_DATA_DIR, station, json_file_name)
if not os.path.isfile(json_file_path):
# search for files of other project
json_file_name = station + "_" + day.strftime("%Y%m%d") + ".json"
json_file_path = os.path.join(WUNDERGROUND_RAW_AIRPORT_DATA_DIR, station, json_file_name)
if not os.path.isfile(json_file_path):
# search for files created by yet another project
json_file_name = day.strftime("%Y-%m-%d") + ".json"
json_file_path = os.path.join(WUNDERGROUND_RAW_AIRPORT_DATA_DIR, station, json_file_name)
if not os.path.isfile(json_file_path):
logging.warning("missing input file: " + json_file_path)
return
if os.path.getsize(json_file_path) == 0:
logging.warning("encountered an empty file: ", json_file_path)
os.remove(json_file_path)
return
with open(json_file_path) as f:
raw_json_weather_data = json.load(f)
# These are the relevant observations we want to keep
observations = []
header = _get_header()
observations.append(header)
for raw_observation in raw_json_weather_data["history"]["observations"]:
observation = {}
utc_date = _parse_utc_date(raw_observation["utcdate"])
observation["datetime"] = utc_date.isoformat()
observation["temperature"] = _cast_number(raw_observation["tempm"])
observation["dewpoint"] = _cast_number(raw_observation["dewptm"])
observation["windspeed"] = _cast_number(raw_observation["wspdm"], raw_observation["wspdi"])
observation["windgust"] = _cast_number(raw_observation["wgustm"], raw_observation["wgusti"])
observation["winddirection"] = _cast_number(raw_observation["wdird"], raw_observation["wdird"])
observation["pressure"] = _cast_number(raw_observation["pressurem"])
observation["humidity"] = _cast_number(raw_observation["hum"])
if "precip_ratem" in raw_observation:
observation["precipitation"] = _cast_number(raw_observation["precip_ratem"],
raw_observation["precip_ratei"])
else:
observation["precipitation"] = ""
if raw_observation["metar"].startswith("METAR"): # some other record
observation["cloudcover"] = get_cloud_cover(raw_observation["metar"], utc_date)
else:
observation["cloudcover"] = numpy.nan
observations.append(HEADER_FORMAT.format(**observation))
return "\n".join(observations)
def _open_daily_summary(station, day):
"""
:param station: The name of the station, e.g. 'IHAMBURG69'
:param day: The day to get the summary for (can be naive)
:return: The corresponding data frame
"""
csv_file = os.path.join(WUNDERGROUND_RAW_AIRPORT_DATA_DIR, station, _get_file_name(station, day, "csv"))
data_frame = pandas.read_csv(csv_file, index_col="datetime", parse_dates=["datetime"])
return data_frame
def _create_csv_from_json(station, day, force_overwrite):
"""
:param force_overwrite: Whether to overwrite old daily summary files.
:param station: The name of the station, e.g. 'IHAMBURG69'
:param day: The day to pick the json from
"""
processed_station_dir = os.path.join(WUNDERGROUND_RAW_AIRPORT_DATA_DIR, station)
if not os.path.isdir(processed_station_dir):
os.mkdir(processed_station_dir)
csv_path = os.path.join(processed_station_dir, _get_file_name(station, day, 'csv'))
if os.path.isfile(csv_path) and os.path.getsize(csv_path) and not force_overwrite:
logging.info("skip " + csv_path)
return
with open(csv_path, "w") as f:
csv_file_content = _get_data_for_single_day(station, day)
if csv_file_content is not None:
f.write(csv_file_content)
else:
f.write(_get_header())
def join_daily_summaries(station, start_date, end_date, force_overwrite):
"""
:param station:
:param start_date:
:param end_date:
:param force_overwrite:
:return:
"""
date_to_check = start_date
span_summary_file_name = station + "_" + start_date.strftime("%Y%m%d") + "_" + end_date.strftime("%Y%m%d") + ".csv"
output_dir = os.path.join(PROCESSED_DATA_DIR, "station_summaries")
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
span_summary_path = os.path.join(output_dir, span_summary_file_name)
if os.path.isdir(span_summary_path) and not force_overwrite:
logging.info("skip " + span_summary_path)
return
data_frame = _open_daily_summary(station, start_date)
start_date += datetime.timedelta(days=1)
while date_to_check <= end_date:
data_frame_next = _open_daily_summary(station, date_to_check)
data_frame = data_frame.append(data_frame_next)
date_to_check = date_to_check + datetime.timedelta(days=1)
# remove duplicates (happens if same entry exists for two days)
data_frame.groupby(data_frame.index).first()
data_frame.sort_index(inplace=True)
data_frame.to_csv(span_summary_path)
def create_daily_summaries_for_time_span(station, start_date, end_date, force_overwrite):
"""
:param force_overwrite: Whether to overwrite old daily summary files.
:param station: The name of the station, e.g. 'IHAMBURG69'
:param start_date: The date to start (included)
:param end_date: The date to stop (included)
:return:
"""
date_to_check = start_date
while date_to_check <= end_date:
_create_csv_from_json(station, date_to_check, force_overwrite)
date_to_check = date_to_check + datetime.timedelta(days=1)
def demo():
stations = ["EDDH"]
for station in stations:
logging.info(station)
start_date = datetime.datetime(2016, 1, 1)
end_date = datetime.datetime(2016, 12, 31)
logging.info("create daily summaries")
create_daily_summaries_for_time_span(station, start_date, end_date, False)
logging.info("create time span summary")
join_daily_summaries(station, start_date, end_date, True)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
demo()
|
agpl-3.0
|
jpbrucker/mbed
|
workspace_tools/host_tests/default_auto.py
|
101
|
1236
|
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from sys import stdout
class DefaultAuto():
""" Simple, basic host test's test runner waiting for serial port
output from MUT, no supervision over test running in MUT is executed.
"""
def test(self, selftest):
result = selftest.RESULT_SUCCESS
try:
while True:
c = selftest.mbed.serial_read(512)
if c is None:
return selftest.RESULT_IO_SERIAL
stdout.write(c)
stdout.flush()
except KeyboardInterrupt, _:
selftest.notify("\r\n[CTRL+C] exit")
result = selftest.RESULT_ERROR
return result
|
apache-2.0
|
vitay/ANNarchy
|
ANNarchy/core/PopulationView.py
|
2
|
12538
|
#===============================================================================
#
# PopulationView
#
# This file is part of ANNarchy.
#
# Copyright (C) 2013-2016 Julien Vitay <[email protected]>,
# Helge Uelo Dinkelbach <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ANNarchy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#===============================================================================
from ANNarchy.core import Global as Global
from .Random import RandomDistribution
import numpy as np
class PopulationView(object):
""" Container representing a subset of neurons of a Population."""
def __init__(self, population, ranks, geometry=None):
"""
Create a view of a subset of neurons within the same population.
:param population: population object
:param ranks: list or numpy array containing the ranks of the selected neurons.
:param geometry: a geometry for the Populationview (optional)
"""
self.population = population
self.ranks = ranks
self.geometry = geometry
self.size = len(self.ranks)
# For people using Individual neuron
if self.size == 1:
self.rank = self.ranks[0]
else:
self.rank = self.ranks
self.neuron_type = self.population.neuron_type
self.id = self.population.id
self.name = population.name
self.cyInstance = population.cyInstance
self.variables = population.variables
self.attributes = population.attributes
self.max_delay = population.max_delay
def _copy(self):
"Returns a copy of the population when creating networks. Internal use only."
return PopulationView(population=self.population, ranks=self.ranks, geometry=self.geometry)
################################
# Indexing
################################
def __len__(self):
"""
Number of neurons in the population view.
"""
return self.size
def rank_from_coordinates(self, coord, local=False):
"""
Returns the rank of a neuron based on coordinates.
When local is False (default), the coordinates are relative to the ORIGINAL population, not the PopulationView.
When local is True, the coordinates are interpreted relative to the geometry of the PopulationView if available. When you add two population views, the geometry is lost and the method will return an error.
The rank is relative to the original population. Iterate over len(pop) otherwise.
:param coord: coordinate tuple, can be multidimensional.
:param local: whther the coordinates are local to the PopulationView or not (default: False).
"""
if not local:
rk = self.population.rank_from_coordinates(coord)
if not rk in self.ranks:
Global._error("There is no neuron of coordinates", coord, "in the PopulationView.")
return rk
else:
if not self.geometry:
Global._error("The population view does not have a geometry, cannot use local coordinates.")
else:
try:
intern_rank = np.ravel_multi_index(coord, self.geometry)
except:
Global._error("There is no neuron of coordinates", coord, "in a PopulationView of geometry", self.geometry)
return self.ranks[intern_rank]
def coordinates_from_rank(self, rank, local=False):
"""
Returns the coordinates of a neuron based on its rank.
When local is False (default), the coordinates are relative to the ORIGINAL population, not the PopulationView.
When local is True, the coordinates are interpreted relative to the geometry of the PopulationView if available. When you add two population views, the geometry is lost and the method will return an error.
The rank is relative to the original population. Iterate over len(pop) otherwise.
:param rank: rank of the neuron in the original population
:param local: whether the coordinates are local to the PopulationView or not (default: False).
"""
if not local:
return self.population.coordinates_from_rank(rank)
else:
if not self.geometry:
Global._error("The population view does not have a geometry, cannot use local coordinates.")
else:
if not rank in self.ranks:
Global._error("There is no neuron of rank", rank, "in the PopulationView.")
intern_rk = self.ranks.index(rank)
coord = np.unravel_index(intern_rk, self.geometry)
return coord
################################
# Targets must match the population, both in read and write
################################
@property
def targets(self):
"List of targets connected to the population."
return self.population.targets
@targets.setter
def targets(self, value):
self.population.targets.append(value)
################################
## Access to attributes
################################
def __getattr__(self, name):
" Method called when accessing an attribute."
if name == 'population':
return object.__getattribute__(self, name)
elif hasattr(self.population, 'attributes'):
if name in self.population.attributes:
return self.get(name)
else:
return object.__getattribute__(self, name)
else:
return object.__getattribute__(self, name)
def __setattr__(self, name, value):
" Method called when setting an attribute."
if name == 'population':
object.__setattr__(self, name, value)
elif hasattr(self, 'population'):
if name in self.population.attributes:
self.set({name: value})
else:
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, value)
def get(self, name):
"""
Returns current variable/parameter value.
:param name: name of the parameter/variable.
"""
if name in self.population.attributes:
all_val = getattr(self.population, name).reshape(self.population.size)
return all_val[self.ranks]
else:
Global._error("Population does not have a parameter/variable called " + name + ".")
def set(self, value):
"""
Updates the neurons' variable/parameter values.
:param value: dictionary of parameters/variables to be updated for the corresponding subset of neurons. It can be a single value or a list/1D array of the same size as the PopulationView.
.. code-block:: python
>>> subpop = pop[0:5]
>>> subpop.set( {'tau' : 20, 'r'= np.random.rand(subpop.size) } )
.. warning::
If you modify the value of a global parameter, this will be the case for ALL neurons of the population, not only the subset.
"""
def _set_single(name, rank, value):
if not self.population.initialized:
if not name in self.population.neuron_type.description['local']:
Global._error('can not set the value of a global attribute from a PopulationView.')
return
if isinstance(self.population.init[name], np.ndarray):
if len(self.population.geometry) == 1:
self.population.init[name][rank] = value
else: # Need to access the coordinates
coords = self.population.coordinates_from_rank(rank)
self.population.init[name][coords] = value
else:
val = self.population.init[name]
data = val * np.ones(self.population.size)
data[rank] = value
self.population.init[name] = data.reshape(self.population.geometry)
else:
getattr(self.population.cyInstance, 'set_single_'+name)(rank, value)
for val_key in value.keys():
if hasattr(self.population, val_key):
# Check the value
if isinstance(value[val_key], RandomDistribution): # Make sure it is generated only once
value[val_key] = np.array(value[val_key].get_values(self.size))
if isinstance(value[val_key], np.ndarray): # np.array
if value[val_key].ndim >1 or len(value[val_key]) != self.size:
Global._error("You can only provide an array of the same size as the PopulationView", self.size)
return None
if val_key in self.population.neuron_type.description['global']:
Global._error("Global attributes can only have one value in a population.")
return None
# Assign the value
for idx, rk in enumerate(self.ranks):
_set_single(val_key, rk, value[val_key][idx])
elif isinstance(value[val_key], list): # list
if len(value[val_key]) != self.size:
Global._error("You can only provide a list of the same size as the PopulationView", self.size)
return None
if val_key in self.population.neuron_type.description['global']:
Global._error("Global attributes can only have one value in a population.")
return None
# Assign the value
for idx, rk in enumerate(self.ranks):
_set_single(val_key, rk, value[val_key][idx])
else: # single value
for rk in self.ranks:
_set_single(val_key, rk, value[val_key])
else:
Global._error("the population has no attribute called ", val_key)
return None
################################
## Access to weighted sums
################################
def sum(self, target):
"""
Returns the array of weighted sums corresponding to the target::
excitatory = pop.sum('exc')
For spiking networks, this is equivalent to accessing the conductances directly::
excitatory = pop.g_exc
If no incoming projection has the given target, the method returns zeros.
:param target: the desired projection target.
**Note:** it is not possible to distinguish the original population when the same target is used.
"""
return self.population.sum(target)[self.ranks]
################################
## Composition
################################
def __add__(self, other):
"""Allows to join two PopulationViews if they have the same population."""
from ANNarchy.core.Neuron import IndividualNeuron
if other.population == self.population:
if isinstance(other, IndividualNeuron):
return PopulationView(self.population, list(set(self.ranks + [other.rank])))
elif isinstance(other, PopulationView):
return PopulationView(self.population, list(set(self.ranks + other.ranks)))
else:
Global._error("can only add two PopulationViews of the same population.")
def __repr__(self):
"""Defines the printing behaviour."""
string ="PopulationView of " + str(self.population.name) + '\n'
string += ' Ranks: ' + str(self.ranks)
string += '\n'
for rk in self.ranks:
string += '* ' + str(self.population.neuron(rk)) + '\n'
return string
|
gpl-2.0
|
EvanK/ansible
|
lib/ansible/modules/remote_management/manageiq/manageiq_user.py
|
64
|
10097
|
#!/usr/bin/python
#
# (c) 2017, Daniel Korn <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: manageiq_user
short_description: Management of users in ManageIQ.
extends_documentation_fragment: manageiq
version_added: '2.4'
author: Daniel Korn (@dkorn)
description:
- The manageiq_user module supports adding, updating and deleting users in ManageIQ.
options:
state:
description:
- absent - user should not exist, present - user should be.
choices: ['absent', 'present']
default: 'present'
userid:
description:
- The unique userid in manageiq, often mentioned as username.
required: true
name:
description:
- The users' full name.
password:
description:
- The users' password.
group:
description:
- The name of the group to which the user belongs.
email:
description:
- The users' E-mail address.
update_password:
default: always
choices: ['always', 'on_create']
description:
- C(always) will update passwords unconditionally. C(on_create) will only set the password for a newly created user.
version_added: '2.5'
'''
EXAMPLES = '''
- name: Create a new user in ManageIQ
manageiq_user:
userid: 'jdoe'
name: 'Jane Doe'
password: 'VerySecret'
group: 'EvmGroup-user'
email: '[email protected]'
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
verify_ssl: False
- name: Create a new user in ManageIQ using a token
manageiq_user:
userid: 'jdoe'
name: 'Jane Doe'
password: 'VerySecret'
group: 'EvmGroup-user'
email: '[email protected]'
manageiq_connection:
url: 'http://127.0.0.1:3000'
token: 'sometoken'
verify_ssl: False
- name: Delete a user in ManageIQ
manageiq_user:
state: 'absent'
userid: 'jdoe'
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
verify_ssl: False
- name: Delete a user in ManageIQ using a token
manageiq_user:
state: 'absent'
userid: 'jdoe'
manageiq_connection:
url: 'http://127.0.0.1:3000'
token: 'sometoken'
verify_ssl: False
- name: Update email of user in ManageIQ
manageiq_user:
userid: 'jdoe'
email: '[email protected]'
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
verify_ssl: False
- name: Update email of user in ManageIQ using a token
manageiq_user:
userid: 'jdoe'
email: '[email protected]'
manageiq_connection:
url: 'http://127.0.0.1:3000'
token: 'sometoken'
verify_ssl: False
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.manageiq import ManageIQ, manageiq_argument_spec
class ManageIQUser(object):
"""
Object to execute user management operations in manageiq.
"""
def __init__(self, manageiq):
self.manageiq = manageiq
self.module = self.manageiq.module
self.api_url = self.manageiq.api_url
self.client = self.manageiq.client
def group_id(self, description):
""" Search for group id by group description.
Returns:
the group id, or send a module Fail signal if group not found.
"""
group = self.manageiq.find_collection_resource_by('groups', description=description)
if not group: # group doesn't exist
self.module.fail_json(
msg="group %s does not exist in manageiq" % (description))
return group['id']
def user(self, userid):
""" Search for user object by userid.
Returns:
the user, or None if user not found.
"""
return self.manageiq.find_collection_resource_by('users', userid=userid)
def compare_user(self, user, name, group_id, password, email):
""" Compare user fields with new field values.
Returns:
false if user fields have some difference from new fields, true o/w.
"""
found_difference = (
(name and user['name'] != name) or
(password is not None) or
(email and user['email'] != email) or
(group_id and user['current_group_id'] != group_id)
)
return not found_difference
def delete_user(self, user):
""" Deletes a user from manageiq.
Returns:
a short message describing the operation executed.
"""
try:
url = '%s/users/%s' % (self.api_url, user['id'])
result = self.client.post(url, action='delete')
except Exception as e:
self.module.fail_json(msg="failed to delete user %s: %s" % (user['userid'], str(e)))
return dict(changed=True, msg=result['message'])
def edit_user(self, user, name, group, password, email):
""" Edit a user from manageiq.
Returns:
a short message describing the operation executed.
"""
group_id = None
url = '%s/users/%s' % (self.api_url, user['id'])
resource = dict(userid=user['userid'])
if group is not None:
group_id = self.group_id(group)
resource['group'] = dict(id=group_id)
if name is not None:
resource['name'] = name
if email is not None:
resource['email'] = email
# if there is a password param, but 'update_password' is 'on_create'
# then discard the password (since we're editing an existing user)
if self.module.params['update_password'] == 'on_create':
password = None
if password is not None:
resource['password'] = password
# check if we need to update ( compare_user is true is no difference found )
if self.compare_user(user, name, group_id, password, email):
return dict(
changed=False,
msg="user %s is not changed." % (user['userid']))
# try to update user
try:
result = self.client.post(url, action='edit', resource=resource)
except Exception as e:
self.module.fail_json(msg="failed to update user %s: %s" % (user['userid'], str(e)))
return dict(
changed=True,
msg="successfully updated the user %s: %s" % (user['userid'], result))
def create_user(self, userid, name, group, password, email):
""" Creates the user in manageiq.
Returns:
the created user id, name, created_on timestamp,
updated_on timestamp, userid and current_group_id.
"""
# check for required arguments
for key, value in dict(name=name, group=group, password=password).items():
if value in (None, ''):
self.module.fail_json(msg="missing required argument: %s" % (key))
group_id = self.group_id(group)
url = '%s/users' % (self.api_url)
resource = {'userid': userid, 'name': name, 'password': password, 'group': {'id': group_id}}
if email is not None:
resource['email'] = email
# try to create a new user
try:
result = self.client.post(url, action='create', resource=resource)
except Exception as e:
self.module.fail_json(msg="failed to create user %s: %s" % (userid, str(e)))
return dict(
changed=True,
msg="successfully created the user %s: %s" % (userid, result['results']))
def main():
argument_spec = dict(
userid=dict(required=True, type='str'),
name=dict(),
password=dict(no_log=True),
group=dict(),
email=dict(),
state=dict(choices=['absent', 'present'], default='present'),
update_password=dict(choices=['always', 'on_create'],
default='always'),
)
# add the manageiq connection arguments to the arguments
argument_spec.update(manageiq_argument_spec())
module = AnsibleModule(
argument_spec=argument_spec,
)
userid = module.params['userid']
name = module.params['name']
password = module.params['password']
group = module.params['group']
email = module.params['email']
state = module.params['state']
manageiq = ManageIQ(module)
manageiq_user = ManageIQUser(manageiq)
user = manageiq_user.user(userid)
# user should not exist
if state == "absent":
# if we have a user, delete it
if user:
res_args = manageiq_user.delete_user(user)
# if we do not have a user, nothing to do
else:
res_args = dict(
changed=False,
msg="user %s: does not exist in manageiq" % (userid))
# user shoult exist
if state == "present":
# if we have a user, edit it
if user:
res_args = manageiq_user.edit_user(user, name, group, password, email)
# if we do not have a user, create it
else:
res_args = manageiq_user.create_user(userid, name, group, password, email)
module.exit_json(**res_args)
if __name__ == "__main__":
main()
|
gpl-3.0
|
diogommartins/pox
|
pox/messenger/web_transport.py
|
40
|
8822
|
# Copyright 2011,2012 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Connects the POX messenger bus to HTTP.
Requires the "webserver" component.
NOTE: The web_transport keeps its own session IDs. Since it was first
written, though, sessions IDs have become part of every
Connection, and we could (but are not) reuse those.
"""
from SocketServer import ThreadingMixIn
from BaseHTTPServer import *
import time
import select
import random
import hashlib
import base64
import json
from pox.lib.recoco import Timer
from pox.messenger import Connection, Transport
from pox.core import core
from pox.web.webcore import *
log = core.getLogger()
class HTTPConnection (Connection):
def __init__ (self, transport):
Connection.__init__(self, transport)
self._messages = []
self._cond = threading.Condition()
self._quitting = False
# We're really protected from attack by the session key, we hope
self._tx_seq = -1 #random.randint(0, 1 << 32)
self._rx_seq = None
#self._t = Timer(10, lambda : self.send({'hi':'again'}), recurring=True)
self._touched = time.time()
self._send_welcome()
def _check_timeout (self):
if (time.time() - self._touched) > 120:
log.info("Session " + str(self) + " timed out")
self._close()
def _new_tx_seq (self):
self._tx_seq = (self._tx_seq + 1) & 0x7fFFffFF
return self._tx_seq
def _check_rx_seq (self, seq):
seq = int(seq)
if self._rx_seq is None: self._rx_seq = seq
if seq != self._rx_seq: return False
self._rx_seq = (self._rx_seq + 1) & 0x7fFFffFF
return True
def _close (self):
super(HTTPConnection, self)._close()
#TODO: track request sockets and cancel them?
self._quitting = True
def send_raw (self, data):
self._cond.acquire()
self._messages.append(data)
self._cond.notify()
self._cond.release()
def _do_rx_message (self, items):
for item in items:
self._rx_message(item)
class HTTPTransport (Transport):
def __init__ (self, nexus = None):
Transport.__init__(self, nexus)
self._connections = {}
#self._t = Timer(5, self._check_timeouts, recurring=True)
self._t = Timer(60*2, self._check_timeouts, recurring=True)
def _check_timeouts (self):
for c in self._connections.values():
c._check_timeout()
def _forget (self, connection):
# From MessengerTransport
if connection._session_id in self._connections:
del self._connections[connection._session_id]
else:
#print "Failed to forget", connection
pass
def create_session (self):
ses = HTTPConnection(self)
self._connections[ses._session_id] = ses
return ses
def get_session (self, key):
return self._connections.get(key, None)
class CometRequestHandler (SplitRequestHandler):
protocol_version = 'HTTP/1.1'
# def __init__ (self, *args, **kw):
# super(CometRequestHandler, self).__init__(*args, **kw)
def _init (self):
self.transport = self.args['transport']
self.auth_function = self.args.get('auth', None)
def _doAuth (self):
if self.auth_function:
auth = self.headers.get("Authorization", "").strip().lower()
success = False
if auth.startswith("basic "):
try:
auth = base64.decodestring(auth[6:].strip()).split(':', 1)
success = self.auth_function(auth[0], auth[1])
except:
pass
if success is not True:
self.send_response(401, "Authorization Required")
self.send_header("WWW-Authenticate", 'Basic realm="POX"')
self.end_headers()
return
def _getSession (self):
session_key = self.headers.get("X-POX-Messenger-Session-Key")
if session_key is None:
session_key = self.path.split('/')[-1]
session_key = session_key.strip()
if len(session_key) == 0:
#TODO: return some bad response and log
return None
if session_key == "new":
hmh = self.transport.create_session()
else:
hmh = self.transport.get_session(session_key)
#print session_key, hmh.session_key
return hmh
def _enter (self):
self._doAuth()
hmh = self._getSession()
if hmh is None:
#TODO: return some bad response and log
pass
else:
hmh._touched = time.time()
return hmh
def do_POST (self):
hmh = self._enter()
if hmh is None: return None
l = self.headers.get("Content-Length", "")
if l == "":
data = json.loads(self.rfile.read())
else:
data = json.loads(self.rfile.read(int(l)))
payload = data['data']
# We send null payload for timeout poking and initial setup
if 'seq' in data:
if not hmh._check_rx_seq(data['seq']):
# Bad seq!
data = '{"seq":-1,"ses":"%s"}' % (hmh._session_id,)
self.send_response(400, "Bad sequence number")
self.send_header("Content-Type", "application/json")
self.send_header("Content-Length", len(data))
self.send_header("X-POX-Messenger-Sequence-Number", "-1")
if self.auth_function: self.send_header("WWW-Authenticate",
'Basic realm="POX"')
self.end_headers()
self.wfile.write(data)
hmh._close()
return
if payload is not None:
core.callLater(hmh._do_rx_message, payload)
try:
data = '{"seq":-1,"ses":"%s"}' % (hmh._session_id,)
self.send_response(200, "OK")
self.send_header("Content-Type", "application/json")
self.send_header("Content-Length", len(data))
self.send_header("X-POX-Messenger-Sequence-Number", "-1")
if self.auth_function: self.send_header("WWW-Authenticate",
'Basic realm="POX"')
self.end_headers()
self.wfile.write(data)
except:
import traceback
traceback.print_exc()
pass
return
def do_GET (self):
hmh = self._enter()
if hmh is None: return None
hmh._cond.acquire()
if len(hmh._messages) == 0:
# Wait for messages
while True:
# Every couple seconds check if the socket is dead
hmh._cond.wait(2)
if len(hmh._messages): break
if hmh._quitting: break
r,w,x = select.select([self.wfile],[],[self.wfile], 0)
if len(r) or len(x):
# Other side disconnected?
hmh._cond.release()
return
# Okay...
if hmh._quitting:
#NOTE: we don't drain the messages first, but maybe we should?
try:
data = '{"seq":-1,"ses":"%s"}' % (hmh._session_id,)
self.send_response(200, "OK")
self.send_header("Content-Type", "application/json")
self.send_header("Content-Length", len(data))
self.send_header("X-POX-Messenger-Sequence-Number", "-1")
if self.auth_function: self.send_header("WWW-Authenticate",
'Basic realm="POX"')
self.end_headers()
self.wfile.write(data)
except:
pass
hmh._cond.release()
return
num_messages = min(20, len(hmh._messages))
data = hmh._messages[:num_messages]
old_seq = hmh._tx_seq
seq = hmh._new_tx_seq()
data = '{"seq":%i,"ses":"%s","data":[%s]}' % (seq, hmh._session_id,
','.join(data))
try:
self.send_response(200, "OK")
self.send_header("Content-Type", "application/json")
self.send_header("Content-Length", len(data))
self.send_header("X-POX-Messenger-Sequence-Number", str(seq))
if self.auth_function: self.send_header("WWW-Authenticate",
'Basic realm="POX"')
self.end_headers()
self.wfile.write(data)
del hmh._messages[:num_messages]
except:
hmh._tx_seq = old_seq
hmh._cond.release()
def launch (username='', password=''):
def _launch ():
transport = core.registerNew(HTTPTransport)
# Set up config info
config = {"transport":transport}
if len(username) and len(password):
config['auth'] = lambda u, p: (u == username) and (p == password)
core.WebServer.set_handler("/_webmsg/",CometRequestHandler,config,True)
core.call_when_ready(_launch, ["WebServer","MessengerNexus"],
name = "webmessenger")
|
apache-2.0
|
adrienbrault/home-assistant
|
homeassistant/components/elv/switch.py
|
5
|
2665
|
"""Support for PCA 301 smart switch."""
import logging
import pypca
from serial import SerialException
from homeassistant.components.switch import ATTR_CURRENT_POWER_W, SwitchEntity
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
_LOGGER = logging.getLogger(__name__)
ATTR_TOTAL_ENERGY_KWH = "total_energy_kwh"
DEFAULT_NAME = "PCA 301"
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the PCA switch platform."""
if discovery_info is None:
return
serial_device = discovery_info["device"]
try:
pca = pypca.PCA(serial_device)
pca.open()
entities = [SmartPlugSwitch(pca, device) for device in pca.get_devices()]
add_entities(entities, True)
except SerialException as exc:
_LOGGER.warning("Unable to open serial port: %s", exc)
return
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, pca.close)
pca.start_scan()
class SmartPlugSwitch(SwitchEntity):
"""Representation of a PCA Smart Plug switch."""
def __init__(self, pca, device_id):
"""Initialize the switch."""
self._device_id = device_id
self._name = "PCA 301"
self._state = None
self._available = True
self._emeter_params = {}
self._pca = pca
@property
def name(self):
"""Return the name of the Smart Plug, if any."""
return self._name
@property
def available(self) -> bool:
"""Return if switch is available."""
return self._available
@property
def is_on(self):
"""Return true if switch is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the switch on."""
self._pca.turn_on(self._device_id)
def turn_off(self, **kwargs):
"""Turn the switch off."""
self._pca.turn_off(self._device_id)
@property
def extra_state_attributes(self):
"""Return the state attributes of the device."""
return self._emeter_params
def update(self):
"""Update the PCA switch's state."""
try:
self._emeter_params[
ATTR_CURRENT_POWER_W
] = f"{self._pca.get_current_power(self._device_id):.1f}"
self._emeter_params[
ATTR_TOTAL_ENERGY_KWH
] = f"{self._pca.get_total_consumption(self._device_id):.2f}"
self._available = True
self._state = self._pca.get_state(self._device_id)
except (OSError) as ex:
if self._available:
_LOGGER.warning("Could not read state for %s: %s", self.name, ex)
self._available = False
|
mit
|
superdesk/superdesk-aap
|
server/aap/macros/remove_place_with_no_qcode_test.py
|
3
|
1704
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from superdesk.tests import TestCase
from .remove_place_with_no_qcode import remove_place_with_no_qcode
class RemovePlaceTests(TestCase):
def simple_case_test(self):
item = {
"_id": "tag:localhost:2017:77b03a97-df04-446e-a112-94941f1bb12c",
"place": [
{
"name": "United States"
}
]
}
remove_place_with_no_qcode(item)
self.assertEqual(item['place'], [])
def multiple_case_test(self):
item = {
"_id": "tag:localhost:2017:77b03a97-df04-446e-a112-94941f1bb12c",
"place": [
{
"name": "United States"
},
{
"name": "Wagga Wagga"
}
]
}
remove_place_with_no_qcode(item)
self.assertEqual(item['place'], [])
def keep_one_case_test(self):
item = {
"_id": "tag:localhost:2017:77b03a97-df04-446e-a112-94941f1bb12c",
"place": [
{
"name": "United States"
},
{
"name": "Wagga Wagga",
"qcode": '1'
}
]
}
remove_place_with_no_qcode(item)
self.assertEqual(item['place'], [{"name": "Wagga Wagga", "qcode": '1'}])
|
agpl-3.0
|
hyunokoh/s4_qemu
|
scripts/vmstate-static-checker.py
|
51
|
15334
|
#!/usr/bin/python
#
# Compares vmstate information stored in JSON format, obtained from
# the -dump-vmstate QEMU command.
#
# Copyright 2014 Amit Shah <[email protected]>
# Copyright 2014 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, see <http://www.gnu.org/licenses/>.
import argparse
import json
import sys
# Count the number of errors found
taint = 0
def bump_taint():
global taint
# Ensure we don't wrap around or reset to 0 -- the shell only has
# an 8-bit return value.
if taint < 255:
taint = taint + 1
def check_fields_match(name, s_field, d_field):
if s_field == d_field:
return True
# Some fields changed names between qemu versions. This list
# is used to whitelist such changes in each section / description.
changed_names = {
'apic': ['timer', 'timer_expiry'],
'e1000': ['dev', 'parent_obj'],
'ehci': ['dev', 'pcidev'],
'I440FX': ['dev', 'parent_obj'],
'ich9_ahci': ['card', 'parent_obj'],
'ich9-ahci': ['ahci', 'ich9_ahci'],
'ioh3420': ['PCIDevice', 'PCIEDevice'],
'ioh-3240-express-root-port': ['port.br.dev',
'parent_obj.parent_obj.parent_obj',
'port.br.dev.exp.aer_log',
'parent_obj.parent_obj.parent_obj.exp.aer_log'],
'lsiscsi': ['dev', 'parent_obj'],
'mch': ['d', 'parent_obj'],
'pci_bridge': ['bridge.dev', 'parent_obj', 'bridge.dev.shpc', 'shpc'],
'pcnet': ['pci_dev', 'parent_obj'],
'PIIX3': ['pci_irq_levels', 'pci_irq_levels_vmstate'],
'piix4_pm': ['dev', 'parent_obj', 'pci0_status',
'acpi_pci_hotplug.acpi_pcihp_pci_status[0x0]',
'pm1a.sts', 'ar.pm1.evt.sts', 'pm1a.en', 'ar.pm1.evt.en',
'pm1_cnt.cnt', 'ar.pm1.cnt.cnt',
'tmr.timer', 'ar.tmr.timer',
'tmr.overflow_time', 'ar.tmr.overflow_time',
'gpe', 'ar.gpe'],
'rtl8139': ['dev', 'parent_obj'],
'qxl': ['num_surfaces', 'ssd.num_surfaces'],
'usb-ccid': ['abProtocolDataStructure', 'abProtocolDataStructure.data'],
'usb-host': ['dev', 'parent_obj'],
'usb-mouse': ['usb-ptr-queue', 'HIDPointerEventQueue'],
'usb-tablet': ['usb-ptr-queue', 'HIDPointerEventQueue'],
'vmware_vga': ['card', 'parent_obj'],
'vmware_vga_internal': ['depth', 'new_depth'],
'xhci': ['pci_dev', 'parent_obj'],
'x3130-upstream': ['PCIDevice', 'PCIEDevice'],
'xio3130-express-downstream-port': ['port.br.dev',
'parent_obj.parent_obj.parent_obj',
'port.br.dev.exp.aer_log',
'parent_obj.parent_obj.parent_obj.exp.aer_log'],
'xio3130-downstream': ['PCIDevice', 'PCIEDevice'],
'xio3130-express-upstream-port': ['br.dev', 'parent_obj.parent_obj',
'br.dev.exp.aer_log',
'parent_obj.parent_obj.exp.aer_log'],
}
if not name in changed_names:
return False
if s_field in changed_names[name] and d_field in changed_names[name]:
return True
return False
def get_changed_sec_name(sec):
# Section names can change -- see commit 292b1634 for an example.
changes = {
"ICH9 LPC": "ICH9-LPC",
}
for item in changes:
if item == sec:
return changes[item]
if changes[item] == sec:
return item
return ""
def exists_in_substruct(fields, item):
# Some QEMU versions moved a few fields inside a substruct. This
# kept the on-wire format the same. This function checks if
# something got shifted inside a substruct. For example, the
# change in commit 1f42d22233b4f3d1a2933ff30e8d6a6d9ee2d08f
if not "Description" in fields:
return False
if not "Fields" in fields["Description"]:
return False
substruct_fields = fields["Description"]["Fields"]
if substruct_fields == []:
return False
return check_fields_match(fields["Description"]["name"],
substruct_fields[0]["field"], item)
def check_fields(src_fields, dest_fields, desc, sec):
# This function checks for all the fields in a section. If some
# fields got embedded into a substruct, this function will also
# attempt to check inside the substruct.
d_iter = iter(dest_fields)
s_iter = iter(src_fields)
# Using these lists as stacks to store previous value of s_iter
# and d_iter, so that when time comes to exit out of a substruct,
# we can go back one level up and continue from where we left off.
s_iter_list = []
d_iter_list = []
advance_src = True
advance_dest = True
unused_count = 0
while True:
if advance_src:
try:
s_item = s_iter.next()
except StopIteration:
if s_iter_list == []:
break
s_iter = s_iter_list.pop()
continue
else:
if unused_count == 0:
# We want to avoid advancing just once -- when entering a
# dest substruct, or when exiting one.
advance_src = True
if advance_dest:
try:
d_item = d_iter.next()
except StopIteration:
if d_iter_list == []:
# We were not in a substruct
print "Section \"" + sec + "\",",
print "Description " + "\"" + desc + "\":",
print "expected field \"" + s_item["field"] + "\",",
print "while dest has no further fields"
bump_taint()
break
d_iter = d_iter_list.pop()
advance_src = False
continue
else:
if unused_count == 0:
advance_dest = True
if unused_count > 0:
if advance_dest == False:
unused_count = unused_count - s_item["size"]
if unused_count == 0:
advance_dest = True
continue
if unused_count < 0:
print "Section \"" + sec + "\",",
print "Description \"" + desc + "\":",
print "unused size mismatch near \"",
print s_item["field"] + "\""
bump_taint()
break
continue
if advance_src == False:
unused_count = unused_count - d_item["size"]
if unused_count == 0:
advance_src = True
continue
if unused_count < 0:
print "Section \"" + sec + "\",",
print "Description \"" + desc + "\":",
print "unused size mismatch near \"",
print d_item["field"] + "\""
bump_taint()
break
continue
if not check_fields_match(desc, s_item["field"], d_item["field"]):
# Some fields were put in substructs, keeping the
# on-wire format the same, but breaking static tools
# like this one.
# First, check if dest has a new substruct.
if exists_in_substruct(d_item, s_item["field"]):
# listiterators don't have a prev() function, so we
# have to store our current location, descend into the
# substruct, and ensure we come out as if nothing
# happened when the substruct is over.
#
# Essentially we're opening the substructs that got
# added which didn't change the wire format.
d_iter_list.append(d_iter)
substruct_fields = d_item["Description"]["Fields"]
d_iter = iter(substruct_fields)
advance_src = False
continue
# Next, check if src has substruct that dest removed
# (can happen in backward migration: 2.0 -> 1.5)
if exists_in_substruct(s_item, d_item["field"]):
s_iter_list.append(s_iter)
substruct_fields = s_item["Description"]["Fields"]
s_iter = iter(substruct_fields)
advance_dest = False
continue
if s_item["field"] == "unused" or d_item["field"] == "unused":
if s_item["size"] == d_item["size"]:
continue
if d_item["field"] == "unused":
advance_dest = False
unused_count = d_item["size"] - s_item["size"]
continue
if s_item["field"] == "unused":
advance_src = False
unused_count = s_item["size"] - d_item["size"]
continue
print "Section \"" + sec + "\",",
print "Description \"" + desc + "\":",
print "expected field \"" + s_item["field"] + "\",",
print "got \"" + d_item["field"] + "\"; skipping rest"
bump_taint()
break
check_version(s_item, d_item, sec, desc)
if not "Description" in s_item:
# Check size of this field only if it's not a VMSTRUCT entry
check_size(s_item, d_item, sec, desc, s_item["field"])
check_description_in_list(s_item, d_item, sec, desc)
def check_subsections(src_sub, dest_sub, desc, sec):
for s_item in src_sub:
found = False
for d_item in dest_sub:
if s_item["name"] != d_item["name"]:
continue
found = True
check_descriptions(s_item, d_item, sec)
if not found:
print "Section \"" + sec + "\", Description \"" + desc + "\":",
print "Subsection \"" + s_item["name"] + "\" not found"
bump_taint()
def check_description_in_list(s_item, d_item, sec, desc):
if not "Description" in s_item:
return
if not "Description" in d_item:
print "Section \"" + sec + "\", Description \"" + desc + "\",",
print "Field \"" + s_item["field"] + "\": missing description"
bump_taint()
return
check_descriptions(s_item["Description"], d_item["Description"], sec)
def check_descriptions(src_desc, dest_desc, sec):
check_version(src_desc, dest_desc, sec, src_desc["name"])
if not check_fields_match(sec, src_desc["name"], dest_desc["name"]):
print "Section \"" + sec + "\":",
print "Description \"" + src_desc["name"] + "\"",
print "missing, got \"" + dest_desc["name"] + "\" instead; skipping"
bump_taint()
return
for f in src_desc:
if not f in dest_desc:
print "Section \"" + sec + "\"",
print "Description \"" + src_desc["name"] + "\":",
print "Entry \"" + f + "\" missing"
bump_taint()
continue
if f == 'Fields':
check_fields(src_desc[f], dest_desc[f], src_desc["name"], sec)
if f == 'Subsections':
check_subsections(src_desc[f], dest_desc[f], src_desc["name"], sec)
def check_version(s, d, sec, desc=None):
if s["version_id"] > d["version_id"]:
print "Section \"" + sec + "\"",
if desc:
print "Description \"" + desc + "\":",
print "version error:", s["version_id"], ">", d["version_id"]
bump_taint()
if not "minimum_version_id" in d:
return
if s["version_id"] < d["minimum_version_id"]:
print "Section \"" + sec + "\"",
if desc:
print "Description \"" + desc + "\":",
print "minimum version error:", s["version_id"], "<",
print d["minimum_version_id"]
bump_taint()
def check_size(s, d, sec, desc=None, field=None):
if s["size"] != d["size"]:
print "Section \"" + sec + "\"",
if desc:
print "Description \"" + desc + "\"",
if field:
print "Field \"" + field + "\"",
print "size mismatch:", s["size"], ",", d["size"]
bump_taint()
def check_machine_type(s, d):
if s["Name"] != d["Name"]:
print "Warning: checking incompatible machine types:",
print "\"" + s["Name"] + "\", \"" + d["Name"] + "\""
return
def main():
help_text = "Parse JSON-formatted vmstate dumps from QEMU in files SRC and DEST. Checks whether migration from SRC to DEST QEMU versions would break based on the VMSTATE information contained within the JSON outputs. The JSON output is created from a QEMU invocation with the -dump-vmstate parameter and a filename argument to it. Other parameters to QEMU do not matter, except the -M (machine type) parameter."
parser = argparse.ArgumentParser(description=help_text)
parser.add_argument('-s', '--src', type=file, required=True,
help='json dump from src qemu')
parser.add_argument('-d', '--dest', type=file, required=True,
help='json dump from dest qemu')
parser.add_argument('--reverse', required=False, default=False,
action='store_true',
help='reverse the direction')
args = parser.parse_args()
src_data = json.load(args.src)
dest_data = json.load(args.dest)
args.src.close()
args.dest.close()
if args.reverse:
temp = src_data
src_data = dest_data
dest_data = temp
for sec in src_data:
dest_sec = sec
if not dest_sec in dest_data:
# Either the section name got changed, or the section
# doesn't exist in dest.
dest_sec = get_changed_sec_name(sec)
if not dest_sec in dest_data:
print "Section \"" + sec + "\" does not exist in dest"
bump_taint()
continue
s = src_data[sec]
d = dest_data[dest_sec]
if sec == "vmschkmachine":
check_machine_type(s, d)
continue
check_version(s, d, sec)
for entry in s:
if not entry in d:
print "Section \"" + sec + "\": Entry \"" + entry + "\"",
print "missing"
bump_taint()
continue
if entry == "Description":
check_descriptions(s[entry], d[entry], sec)
return taint
if __name__ == '__main__':
sys.exit(main())
|
gpl-2.0
|
sdphome/UHF_Reader
|
rfs/rootfs/usr/lib/python2.7/encodings/mac_latin2.py
|
647
|
8565
|
""" Python Character Mapping Codec generated from 'LATIN2.TXT' with gencodec.py.
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_map)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-latin2',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x0081: 0x0100, # LATIN CAPITAL LETTER A WITH MACRON
0x0082: 0x0101, # LATIN SMALL LETTER A WITH MACRON
0x0083: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0084: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x0085: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x0086: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x0087: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x0088: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x0089: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x008a: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x008b: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x008c: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
0x008d: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
0x008e: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x008f: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
0x0090: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
0x0091: 0x010e, # LATIN CAPITAL LETTER D WITH CARON
0x0092: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x0093: 0x010f, # LATIN SMALL LETTER D WITH CARON
0x0094: 0x0112, # LATIN CAPITAL LETTER E WITH MACRON
0x0095: 0x0113, # LATIN SMALL LETTER E WITH MACRON
0x0096: 0x0116, # LATIN CAPITAL LETTER E WITH DOT ABOVE
0x0097: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x0098: 0x0117, # LATIN SMALL LETTER E WITH DOT ABOVE
0x0099: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x009a: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x009b: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x009c: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x009d: 0x011a, # LATIN CAPITAL LETTER E WITH CARON
0x009e: 0x011b, # LATIN SMALL LETTER E WITH CARON
0x009f: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x00a0: 0x2020, # DAGGER
0x00a1: 0x00b0, # DEGREE SIGN
0x00a2: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00a4: 0x00a7, # SECTION SIGN
0x00a5: 0x2022, # BULLET
0x00a6: 0x00b6, # PILCROW SIGN
0x00a7: 0x00df, # LATIN SMALL LETTER SHARP S
0x00a8: 0x00ae, # REGISTERED SIGN
0x00aa: 0x2122, # TRADE MARK SIGN
0x00ab: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00ac: 0x00a8, # DIAERESIS
0x00ad: 0x2260, # NOT EQUAL TO
0x00ae: 0x0123, # LATIN SMALL LETTER G WITH CEDILLA
0x00af: 0x012e, # LATIN CAPITAL LETTER I WITH OGONEK
0x00b0: 0x012f, # LATIN SMALL LETTER I WITH OGONEK
0x00b1: 0x012a, # LATIN CAPITAL LETTER I WITH MACRON
0x00b2: 0x2264, # LESS-THAN OR EQUAL TO
0x00b3: 0x2265, # GREATER-THAN OR EQUAL TO
0x00b4: 0x012b, # LATIN SMALL LETTER I WITH MACRON
0x00b5: 0x0136, # LATIN CAPITAL LETTER K WITH CEDILLA
0x00b6: 0x2202, # PARTIAL DIFFERENTIAL
0x00b7: 0x2211, # N-ARY SUMMATION
0x00b8: 0x0142, # LATIN SMALL LETTER L WITH STROKE
0x00b9: 0x013b, # LATIN CAPITAL LETTER L WITH CEDILLA
0x00ba: 0x013c, # LATIN SMALL LETTER L WITH CEDILLA
0x00bb: 0x013d, # LATIN CAPITAL LETTER L WITH CARON
0x00bc: 0x013e, # LATIN SMALL LETTER L WITH CARON
0x00bd: 0x0139, # LATIN CAPITAL LETTER L WITH ACUTE
0x00be: 0x013a, # LATIN SMALL LETTER L WITH ACUTE
0x00bf: 0x0145, # LATIN CAPITAL LETTER N WITH CEDILLA
0x00c0: 0x0146, # LATIN SMALL LETTER N WITH CEDILLA
0x00c1: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
0x00c2: 0x00ac, # NOT SIGN
0x00c3: 0x221a, # SQUARE ROOT
0x00c4: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
0x00c5: 0x0147, # LATIN CAPITAL LETTER N WITH CARON
0x00c6: 0x2206, # INCREMENT
0x00c7: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00c8: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00c9: 0x2026, # HORIZONTAL ELLIPSIS
0x00ca: 0x00a0, # NO-BREAK SPACE
0x00cb: 0x0148, # LATIN SMALL LETTER N WITH CARON
0x00cc: 0x0150, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x00cd: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00ce: 0x0151, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x00cf: 0x014c, # LATIN CAPITAL LETTER O WITH MACRON
0x00d0: 0x2013, # EN DASH
0x00d1: 0x2014, # EM DASH
0x00d2: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x00d3: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x00d4: 0x2018, # LEFT SINGLE QUOTATION MARK
0x00d5: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x00d6: 0x00f7, # DIVISION SIGN
0x00d7: 0x25ca, # LOZENGE
0x00d8: 0x014d, # LATIN SMALL LETTER O WITH MACRON
0x00d9: 0x0154, # LATIN CAPITAL LETTER R WITH ACUTE
0x00da: 0x0155, # LATIN SMALL LETTER R WITH ACUTE
0x00db: 0x0158, # LATIN CAPITAL LETTER R WITH CARON
0x00dc: 0x2039, # SINGLE LEFT-POINTING ANGLE QUOTATION MARK
0x00dd: 0x203a, # SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
0x00de: 0x0159, # LATIN SMALL LETTER R WITH CARON
0x00df: 0x0156, # LATIN CAPITAL LETTER R WITH CEDILLA
0x00e0: 0x0157, # LATIN SMALL LETTER R WITH CEDILLA
0x00e1: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00e2: 0x201a, # SINGLE LOW-9 QUOTATION MARK
0x00e3: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
0x00e4: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00e5: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
0x00e6: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
0x00e7: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00e8: 0x0164, # LATIN CAPITAL LETTER T WITH CARON
0x00e9: 0x0165, # LATIN SMALL LETTER T WITH CARON
0x00ea: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00eb: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00ec: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00ed: 0x016a, # LATIN CAPITAL LETTER U WITH MACRON
0x00ee: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00ef: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00f0: 0x016b, # LATIN SMALL LETTER U WITH MACRON
0x00f1: 0x016e, # LATIN CAPITAL LETTER U WITH RING ABOVE
0x00f2: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00f3: 0x016f, # LATIN SMALL LETTER U WITH RING ABOVE
0x00f4: 0x0170, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x00f5: 0x0171, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x00f6: 0x0172, # LATIN CAPITAL LETTER U WITH OGONEK
0x00f7: 0x0173, # LATIN SMALL LETTER U WITH OGONEK
0x00f8: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00f9: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x00fa: 0x0137, # LATIN SMALL LETTER K WITH CEDILLA
0x00fb: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x00fc: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
0x00fd: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x00fe: 0x0122, # LATIN CAPITAL LETTER G WITH CEDILLA
0x00ff: 0x02c7, # CARON
})
### Encoding Map
encoding_map = codecs.make_encoding_map(decoding_map)
|
gpl-3.0
|
rickerc/glance_audit
|
glance/cmd/manage.py
|
3
|
4395
|
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Glance Management Utility
"""
# FIXME(sirp): When we have glance-admin we can consider merging this into it
# Perhaps for consistency with Nova, we would then rename glance-admin ->
# glance-manage (or the other way around)
import os
import sys
# If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir)
from oslo.config import cfg
from glance.common import config
from glance.common import exception
import glance.db.sqlalchemy.api
import glance.db.sqlalchemy.migration
from glance.openstack.common import log
CONF = cfg.CONF
def do_db_version():
"""Print database's current migration level"""
print glance.db.sqlalchemy.migration.db_version()
def do_upgrade():
"""Upgrade the database's migration level"""
glance.db.sqlalchemy.migration.upgrade(CONF.command.version)
def do_downgrade():
"""Downgrade the database's migration level"""
glance.db.sqlalchemy.migration.downgrade(CONF.command.version)
def do_version_control():
"""Place a database under migration control"""
glance.db.sqlalchemy.migration.version_control(CONF.command.version)
def do_db_sync():
"""
Place a database under migration control and upgrade,
creating first if necessary.
"""
glance.db.sqlalchemy.migration.db_sync(CONF.command.version,
CONF.command.current_version)
def add_command_parsers(subparsers):
parser = subparsers.add_parser('db_version')
parser.set_defaults(func=do_db_version)
parser = subparsers.add_parser('upgrade')
parser.set_defaults(func=do_upgrade)
parser.add_argument('version', nargs='?')
parser = subparsers.add_parser('downgrade')
parser.set_defaults(func=do_downgrade)
parser.add_argument('version')
parser = subparsers.add_parser('version_control')
parser.set_defaults(func=do_version_control)
parser.add_argument('version', nargs='?')
parser = subparsers.add_parser('db_sync')
parser.set_defaults(func=do_db_sync)
parser.add_argument('version', nargs='?')
parser.add_argument('current_version', nargs='?')
command_opt = cfg.SubCommandOpt('command',
title='Commands',
help='Available commands',
handler=add_command_parsers)
def main():
CONF.register_cli_opt(command_opt)
try:
# We load the glance-registry config section because
# sql_connection is only part of the glance registry.
glance.db.sqlalchemy.api.add_cli_options()
cfg_files = cfg.find_config_files(project='glance',
prog='glance-registry')
if not cfg_files:
cfg_files = cfg.find_config_files(project='glance',
prog='glance-api')
config.parse_args(default_config_files=cfg_files,
usage="%(prog)s [options] <cmd>")
log.setup('glance')
except RuntimeError as e:
sys.exit("ERROR: %s" % e)
try:
CONF.command.func()
except exception.GlanceException as e:
sys.exit("ERROR: %s" % e)
if __name__ == '__main__':
main()
|
apache-2.0
|
peterayeni/libforensics
|
code/lf/win/shell/recyclebin/dtypes.py
|
13
|
1410
|
# Copyright 2010 Michael Murr
#
# This file is part of LibForensics.
#
# LibForensics is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LibForensics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with LibForensics. If not, see <http://www.gnu.org/licenses/>.
"""Data structures to read recycle bin INFO2 files."""
# local imports
from lf.dtypes import raw, LERecord
from lf.win.dtypes import DWORD, FILETIME_LE
__docformat__ = "restructuredtext en"
__all__ = [
"INFO2Header", "INFO2Item"
]
class INFO2Header(LERecord):
version = DWORD
unknown1 = DWORD
unknown2 = DWORD # Number of entries in file?
item_size = DWORD
unknown3 = DWORD # Varies per file, a timestamp?
# end class Header
class INFO2Item(LERecord):
name_asc = raw(260)
id = DWORD # DcXX (this is the XX)
drive_num = DWORD # 0 = A, 1 = B, 2 = C, ...
dtime = FILETIME_LE
file_size = DWORD
name_uni = raw(520)
# end class Item
|
lgpl-3.0
|
shanot/imp
|
modules/rmf/dependency/RMF/tools/dev_tools/python_tools/cpp_format.py
|
2
|
5358
|
"""Use the Python pygments library to perform extra checks on C++ grammar."""
from pygments import token
from pygments.lexers.compiled import CppLexer
import re
import os
def check_header_file(fh_name, project_name, errors):
"""Check a single C++ header file"""
_check_file(fh_name, project_name, True, errors)
def check_cpp_file(fh_name, project_name, errors):
"""Check a single C++ source file"""
_check_file(fh_name, project_name, False, errors)
def _check_file(fh_name, project_name, header, errors):
fh, filename = fh_name
s = tokenize_file(fh)
check_tokens(s, filename, project_name, header, errors)
def tokenize_file(fh):
"""Use the Python pygments library to tokenize a C++ file"""
code = fh.read()
c = CppLexer()
scan = []
for (index, tok, value) in c.get_tokens_unprocessed(code):
scan.append((tok, value))
return scan
def check_tokens(scan, filename, project_name, header, errors):
if filename.find("test_") == -1:
# we don't do it for python tests
check_comment_header(scan, filename, errors)
if header:
# Handle older versions of pygments which concatenate \n and # tokens
if len(scan) >= 3 and scan[2][0] == token.Comment.Preproc \
and scan[2][1] == '\n#':
scan[2] = (token.Comment.Preproc, '#')
scan.insert(2, (token.Comment.Text, '\n'))
check_header_start_end(scan, filename, project_name, errors)
def check_comment_header(scan, filename, errors):
if len(scan) < 1 or scan[0][0] not in (token.Comment,
token.Comment.Multiline):
errors.append('%s:1: First line should be a comment ' % filename +
'with a copyright notice and a description of the file')
def have_header_guard(scan):
return len(scan) >= 11 \
and scan[4][0] == token.Comment.Preproc \
and scan[4][1].startswith('ifndef') \
and scan[7][0] == token.Comment.Preproc \
and scan[7][1].startswith('define') \
and scan[-3][0] == token.Comment.Preproc \
and scan[-3][1].startswith('endif') \
and scan[-2][0] in (token.Comment, token.Comment.Multiline)
def get_header_guard(filename, project_name):
"""Get prefix and suffix for header guard"""
guard_prefix = project_name.replace(".", "").upper()
guard_suffix = os.path.split(filename)[1].replace(".", "_").upper()
return guard_prefix, guard_suffix
def check_header_start_end(scan, filename, project_name, errors):
guard_prefix, guard_suffix = get_header_guard(filename, project_name)
header_guard = guard_prefix + '_' + guard_suffix
if len(scan) < 11:
bad = True
else:
bad = False
if not scan[4][0] == token.Comment.Preproc:
bad = True
if not scan[4][1].startswith('ifndef'):
errors.append('%s:%d: Header guard missing #ifndef.'
% (filename, 1))
bad = True
if not scan[7][0] == token.Comment.Preproc:
bad = True
if not scan[7][1].startswith('define'):
errors.append('%s:%d: Header guard missing #define.'
% (filename, 1))
bad = True
if not scan[-3][0] == token.Comment.Preproc \
and not scan[-4][0] == token.Comment.Preproc:
bad = True
if not scan[-3][1].startswith('endif') \
and not scan[-4][1].startswith('endif'):
errors.append('%s:%d: Header guard missing #endif.'
% (filename, 1))
bad = True
if not scan[-2][0] in (token.Comment, token.Comment.Multiline) \
and not scan[-3][0] in (token.Comment, token.Comment.Multiline):
errors.append('%s:%d: Header guard missing closing comment.'
% (filename, 1))
bad = True
guard = scan[4][1][7:]
if not guard.startswith(guard_prefix):
errors.append('%s:%d: Header guard does not start with "%s".'
% (filename, 1, guard_prefix))
bad = True
if not guard.replace("_", "").endswith(guard_suffix.replace("_", "")):
errors.append('%s:%d: Header guard does not end with "%s".'
% (filename, 1, guard_suffix))
bad = True
if not scan[7][1] == 'define ' + guard:
errors.append('%s:%d: Header guard does not define "%s".'
% (filename, 1, guard))
bad = True
if not scan[-2][1] == '/* %s */' % guard \
and not scan[-3][1] == '/* %s */' % guard:
errors.append('%s:%d: Header guard close does not have a '
'comment of "/* %s */".' % (filename, 1, guard))
bad = True
if bad:
errors.append('%s:%d: Missing or incomplete header guard.'
% (filename, 1) + """
Header files should start with a comment, then a blank line, then the rest
of the file wrapped with a header guard. This must start with %s
and end with %s - in between can be placed extra qualifiers, e.g. for a
namespace. For example,
/** Copyright and file description */
#ifndef %s
#define %s
...
#endif /* %s */
""" % (guard_prefix, guard_suffix, header_guard, header_guard, header_guard))
|
gpl-3.0
|
shangwuhencc/scikit-learn
|
sklearn/decomposition/tests/test_incremental_pca.py
|
297
|
8265
|
"""Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
|
bsd-3-clause
|
pheanex/xpython
|
exercises/point-mutations/point_mutations_test.py
|
2
|
1286
|
import unittest
from point_mutations import hamming_distance
class DNATest(unittest.TestCase):
def test_no_difference_between_empty_strands(self):
self.assertEqual(hamming_distance('', ''), 0)
def test_no_difference_between_identical_strands(self):
self.assertEqual(hamming_distance('GGACTGA', 'GGACTGA'), 0)
def test_complete_hamming_distance_in_small_strand(self):
self.assertEqual(hamming_distance('ACT', 'GGA'), 3)
def test_hamming_distance_in_off_by_one_strand(self):
self.assertEqual(
hamming_distance('GGACGGATTCTGACCTGGACTAATTTTGGGG',
'AGGACGGATTCTGACCTGGACTAATTTTGGGG'), 19)
def test_small_hamming_distance_in_middle_somewhere(self):
self.assertEqual(hamming_distance('GGACG', 'GGTCG'), 1)
def test_larger_distance(self):
self.assertEqual(hamming_distance('ACCAGGG', 'ACTATGG'), 2)
def test_ignores_extra_length_on_other_strand_when_longer(self):
self.assertEqual(hamming_distance('AAACTAGGGG', 'AGGCTAGCGGTAGGAC'), 3)
def test_ignores_extra_length_on_original_strand_when_longer(self):
self.assertEqual(
hamming_distance('GACTACGGACAGGGTAGGGAAT', 'GACATCGCACACC'), 5)
if __name__ == '__main__':
unittest.main()
|
mit
|
ptoraskar/django
|
django/contrib/gis/utils/layermapping.py
|
335
|
27300
|
# LayerMapping -- A Django Model/OGR Layer Mapping Utility
"""
The LayerMapping class provides a way to map the contents of OGR
vector files (e.g. SHP files) to Geographic-enabled Django models.
For more information, please consult the GeoDjango documentation:
https://docs.djangoproject.com/en/dev/ref/contrib/gis/layermapping/
"""
import sys
from decimal import Decimal, InvalidOperation as DecimalInvalidOperation
from django.contrib.gis.db.models import GeometryField
from django.contrib.gis.gdal import (
CoordTransform, DataSource, GDALException, OGRGeometry, OGRGeomType,
SpatialReference,
)
from django.contrib.gis.gdal.field import (
OFTDate, OFTDateTime, OFTInteger, OFTReal, OFTString, OFTTime,
)
from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist
from django.db import connections, models, router, transaction
from django.utils import six
from django.utils.encoding import force_text
# LayerMapping exceptions.
class LayerMapError(Exception):
pass
class InvalidString(LayerMapError):
pass
class InvalidDecimal(LayerMapError):
pass
class InvalidInteger(LayerMapError):
pass
class MissingForeignKey(LayerMapError):
pass
class LayerMapping(object):
"A class that maps OGR Layers to GeoDjango Models."
# Acceptable 'base' types for a multi-geometry type.
MULTI_TYPES = {1: OGRGeomType('MultiPoint'),
2: OGRGeomType('MultiLineString'),
3: OGRGeomType('MultiPolygon'),
OGRGeomType('Point25D').num: OGRGeomType('MultiPoint25D'),
OGRGeomType('LineString25D').num: OGRGeomType('MultiLineString25D'),
OGRGeomType('Polygon25D').num: OGRGeomType('MultiPolygon25D'),
}
# Acceptable Django field types and corresponding acceptable OGR
# counterparts.
FIELD_TYPES = {
models.AutoField: OFTInteger,
models.IntegerField: (OFTInteger, OFTReal, OFTString),
models.FloatField: (OFTInteger, OFTReal),
models.DateField: OFTDate,
models.DateTimeField: OFTDateTime,
models.EmailField: OFTString,
models.TimeField: OFTTime,
models.DecimalField: (OFTInteger, OFTReal),
models.CharField: OFTString,
models.SlugField: OFTString,
models.TextField: OFTString,
models.URLField: OFTString,
models.BigIntegerField: (OFTInteger, OFTReal, OFTString),
models.SmallIntegerField: (OFTInteger, OFTReal, OFTString),
models.PositiveSmallIntegerField: (OFTInteger, OFTReal, OFTString),
}
def __init__(self, model, data, mapping, layer=0,
source_srs=None, encoding='utf-8',
transaction_mode='commit_on_success',
transform=True, unique=None, using=None):
"""
A LayerMapping object is initialized using the given Model (not an instance),
a DataSource (or string path to an OGR-supported data file), and a mapping
dictionary. See the module level docstring for more details and keyword
argument usage.
"""
# Getting the DataSource and the associated Layer.
if isinstance(data, six.string_types):
self.ds = DataSource(data, encoding=encoding)
else:
self.ds = data
self.layer = self.ds[layer]
self.using = using if using is not None else router.db_for_write(model)
self.spatial_backend = connections[self.using].ops
# Setting the mapping & model attributes.
self.mapping = mapping
self.model = model
# Checking the layer -- initialization of the object will fail if
# things don't check out before hand.
self.check_layer()
# Getting the geometry column associated with the model (an
# exception will be raised if there is no geometry column).
if connections[self.using].features.supports_transform:
self.geo_field = self.geometry_field()
else:
transform = False
# Checking the source spatial reference system, and getting
# the coordinate transformation object (unless the `transform`
# keyword is set to False)
if transform:
self.source_srs = self.check_srs(source_srs)
self.transform = self.coord_transform()
else:
self.transform = transform
# Setting the encoding for OFTString fields, if specified.
if encoding:
# Making sure the encoding exists, if not a LookupError
# exception will be thrown.
from codecs import lookup
lookup(encoding)
self.encoding = encoding
else:
self.encoding = None
if unique:
self.check_unique(unique)
transaction_mode = 'autocommit' # Has to be set to autocommit.
self.unique = unique
else:
self.unique = None
# Setting the transaction decorator with the function in the
# transaction modes dictionary.
self.transaction_mode = transaction_mode
if transaction_mode == 'autocommit':
self.transaction_decorator = None
elif transaction_mode == 'commit_on_success':
self.transaction_decorator = transaction.atomic
else:
raise LayerMapError('Unrecognized transaction mode: %s' % transaction_mode)
# #### Checking routines used during initialization ####
def check_fid_range(self, fid_range):
"This checks the `fid_range` keyword."
if fid_range:
if isinstance(fid_range, (tuple, list)):
return slice(*fid_range)
elif isinstance(fid_range, slice):
return fid_range
else:
raise TypeError
else:
return None
def check_layer(self):
"""
This checks the Layer metadata, and ensures that it is compatible
with the mapping information and model. Unlike previous revisions,
there is no need to increment through each feature in the Layer.
"""
# The geometry field of the model is set here.
# TODO: Support more than one geometry field / model. However, this
# depends on the GDAL Driver in use.
self.geom_field = False
self.fields = {}
# Getting lists of the field names and the field types available in
# the OGR Layer.
ogr_fields = self.layer.fields
ogr_field_types = self.layer.field_types
# Function for determining if the OGR mapping field is in the Layer.
def check_ogr_fld(ogr_map_fld):
try:
idx = ogr_fields.index(ogr_map_fld)
except ValueError:
raise LayerMapError('Given mapping OGR field "%s" not found in OGR Layer.' % ogr_map_fld)
return idx
# No need to increment through each feature in the model, simply check
# the Layer metadata against what was given in the mapping dictionary.
for field_name, ogr_name in self.mapping.items():
# Ensuring that a corresponding field exists in the model
# for the given field name in the mapping.
try:
model_field = self.model._meta.get_field(field_name)
except FieldDoesNotExist:
raise LayerMapError('Given mapping field "%s" not in given Model fields.' % field_name)
# Getting the string name for the Django field class (e.g., 'PointField').
fld_name = model_field.__class__.__name__
if isinstance(model_field, GeometryField):
if self.geom_field:
raise LayerMapError('LayerMapping does not support more than one GeometryField per model.')
# Getting the coordinate dimension of the geometry field.
coord_dim = model_field.dim
try:
if coord_dim == 3:
gtype = OGRGeomType(ogr_name + '25D')
else:
gtype = OGRGeomType(ogr_name)
except GDALException:
raise LayerMapError('Invalid mapping for GeometryField "%s".' % field_name)
# Making sure that the OGR Layer's Geometry is compatible.
ltype = self.layer.geom_type
if not (ltype.name.startswith(gtype.name) or self.make_multi(ltype, model_field)):
raise LayerMapError('Invalid mapping geometry; model has %s%s, '
'layer geometry type is %s.' %
(fld_name, '(dim=3)' if coord_dim == 3 else '', ltype))
# Setting the `geom_field` attribute w/the name of the model field
# that is a Geometry. Also setting the coordinate dimension
# attribute.
self.geom_field = field_name
self.coord_dim = coord_dim
fields_val = model_field
elif isinstance(model_field, models.ForeignKey):
if isinstance(ogr_name, dict):
# Is every given related model mapping field in the Layer?
rel_model = model_field.remote_field.model
for rel_name, ogr_field in ogr_name.items():
idx = check_ogr_fld(ogr_field)
try:
rel_model._meta.get_field(rel_name)
except FieldDoesNotExist:
raise LayerMapError('ForeignKey mapping field "%s" not in %s fields.' %
(rel_name, rel_model.__class__.__name__))
fields_val = rel_model
else:
raise TypeError('ForeignKey mapping must be of dictionary type.')
else:
# Is the model field type supported by LayerMapping?
if model_field.__class__ not in self.FIELD_TYPES:
raise LayerMapError('Django field type "%s" has no OGR mapping (yet).' % fld_name)
# Is the OGR field in the Layer?
idx = check_ogr_fld(ogr_name)
ogr_field = ogr_field_types[idx]
# Can the OGR field type be mapped to the Django field type?
if not issubclass(ogr_field, self.FIELD_TYPES[model_field.__class__]):
raise LayerMapError('OGR field "%s" (of type %s) cannot be mapped to Django %s.' %
(ogr_field, ogr_field.__name__, fld_name))
fields_val = model_field
self.fields[field_name] = fields_val
def check_srs(self, source_srs):
"Checks the compatibility of the given spatial reference object."
if isinstance(source_srs, SpatialReference):
sr = source_srs
elif isinstance(source_srs, self.spatial_backend.spatial_ref_sys()):
sr = source_srs.srs
elif isinstance(source_srs, (int, six.string_types)):
sr = SpatialReference(source_srs)
else:
# Otherwise just pulling the SpatialReference from the layer
sr = self.layer.srs
if not sr:
raise LayerMapError('No source reference system defined.')
else:
return sr
def check_unique(self, unique):
"Checks the `unique` keyword parameter -- may be a sequence or string."
if isinstance(unique, (list, tuple)):
# List of fields to determine uniqueness with
for attr in unique:
if attr not in self.mapping:
raise ValueError
elif isinstance(unique, six.string_types):
# Only a single field passed in.
if unique not in self.mapping:
raise ValueError
else:
raise TypeError('Unique keyword argument must be set with a tuple, list, or string.')
# Keyword argument retrieval routines ####
def feature_kwargs(self, feat):
"""
Given an OGR Feature, this will return a dictionary of keyword arguments
for constructing the mapped model.
"""
# The keyword arguments for model construction.
kwargs = {}
# Incrementing through each model field and OGR field in the
# dictionary mapping.
for field_name, ogr_name in self.mapping.items():
model_field = self.fields[field_name]
if isinstance(model_field, GeometryField):
# Verify OGR geometry.
try:
val = self.verify_geom(feat.geom, model_field)
except GDALException:
raise LayerMapError('Could not retrieve geometry from feature.')
elif isinstance(model_field, models.base.ModelBase):
# The related _model_, not a field was passed in -- indicating
# another mapping for the related Model.
val = self.verify_fk(feat, model_field, ogr_name)
else:
# Otherwise, verify OGR Field type.
val = self.verify_ogr_field(feat[ogr_name], model_field)
# Setting the keyword arguments for the field name with the
# value obtained above.
kwargs[field_name] = val
return kwargs
def unique_kwargs(self, kwargs):
"""
Given the feature keyword arguments (from `feature_kwargs`) this routine
will construct and return the uniqueness keyword arguments -- a subset
of the feature kwargs.
"""
if isinstance(self.unique, six.string_types):
return {self.unique: kwargs[self.unique]}
else:
return {fld: kwargs[fld] for fld in self.unique}
# #### Verification routines used in constructing model keyword arguments. ####
def verify_ogr_field(self, ogr_field, model_field):
"""
Verifies if the OGR Field contents are acceptable to the Django
model field. If they are, the verified value is returned,
otherwise the proper exception is raised.
"""
if (isinstance(ogr_field, OFTString) and
isinstance(model_field, (models.CharField, models.TextField))):
if self.encoding:
# The encoding for OGR data sources may be specified here
# (e.g., 'cp437' for Census Bureau boundary files).
val = force_text(ogr_field.value, self.encoding)
else:
val = ogr_field.value
if model_field.max_length and len(val) > model_field.max_length:
raise InvalidString('%s model field maximum string length is %s, given %s characters.' %
(model_field.name, model_field.max_length, len(val)))
elif isinstance(ogr_field, OFTReal) and isinstance(model_field, models.DecimalField):
try:
# Creating an instance of the Decimal value to use.
d = Decimal(str(ogr_field.value))
except DecimalInvalidOperation:
raise InvalidDecimal('Could not construct decimal from: %s' % ogr_field.value)
# Getting the decimal value as a tuple.
dtup = d.as_tuple()
digits = dtup[1]
d_idx = dtup[2] # index where the decimal is
# Maximum amount of precision, or digits to the left of the decimal.
max_prec = model_field.max_digits - model_field.decimal_places
# Getting the digits to the left of the decimal place for the
# given decimal.
if d_idx < 0:
n_prec = len(digits[:d_idx])
else:
n_prec = len(digits) + d_idx
# If we have more than the maximum digits allowed, then throw an
# InvalidDecimal exception.
if n_prec > max_prec:
raise InvalidDecimal(
'A DecimalField with max_digits %d, decimal_places %d must '
'round to an absolute value less than 10^%d.' %
(model_field.max_digits, model_field.decimal_places, max_prec)
)
val = d
elif isinstance(ogr_field, (OFTReal, OFTString)) and isinstance(model_field, models.IntegerField):
# Attempt to convert any OFTReal and OFTString value to an OFTInteger.
try:
val = int(ogr_field.value)
except ValueError:
raise InvalidInteger('Could not construct integer from: %s' % ogr_field.value)
else:
val = ogr_field.value
return val
def verify_fk(self, feat, rel_model, rel_mapping):
"""
Given an OGR Feature, the related model and its dictionary mapping,
this routine will retrieve the related model for the ForeignKey
mapping.
"""
# TODO: It is expensive to retrieve a model for every record --
# explore if an efficient mechanism exists for caching related
# ForeignKey models.
# Constructing and verifying the related model keyword arguments.
fk_kwargs = {}
for field_name, ogr_name in rel_mapping.items():
fk_kwargs[field_name] = self.verify_ogr_field(feat[ogr_name], rel_model._meta.get_field(field_name))
# Attempting to retrieve and return the related model.
try:
return rel_model.objects.using(self.using).get(**fk_kwargs)
except ObjectDoesNotExist:
raise MissingForeignKey(
'No ForeignKey %s model found with keyword arguments: %s' %
(rel_model.__name__, fk_kwargs)
)
def verify_geom(self, geom, model_field):
"""
Verifies the geometry -- will construct and return a GeometryCollection
if necessary (for example if the model field is MultiPolygonField while
the mapped shapefile only contains Polygons).
"""
# Downgrade a 3D geom to a 2D one, if necessary.
if self.coord_dim != geom.coord_dim:
geom.coord_dim = self.coord_dim
if self.make_multi(geom.geom_type, model_field):
# Constructing a multi-geometry type to contain the single geometry
multi_type = self.MULTI_TYPES[geom.geom_type.num]
g = OGRGeometry(multi_type)
g.add(geom)
else:
g = geom
# Transforming the geometry with our Coordinate Transformation object,
# but only if the class variable `transform` is set w/a CoordTransform
# object.
if self.transform:
g.transform(self.transform)
# Returning the WKT of the geometry.
return g.wkt
# #### Other model methods ####
def coord_transform(self):
"Returns the coordinate transformation object."
SpatialRefSys = self.spatial_backend.spatial_ref_sys()
try:
# Getting the target spatial reference system
target_srs = SpatialRefSys.objects.using(self.using).get(srid=self.geo_field.srid).srs
# Creating the CoordTransform object
return CoordTransform(self.source_srs, target_srs)
except Exception as msg:
new_msg = 'Could not translate between the data source and model geometry: %s' % msg
six.reraise(LayerMapError, LayerMapError(new_msg), sys.exc_info()[2])
def geometry_field(self):
"Returns the GeometryField instance associated with the geographic column."
# Use `get_field()` on the model's options so that we
# get the correct field instance if there's model inheritance.
opts = self.model._meta
return opts.get_field(self.geom_field)
def make_multi(self, geom_type, model_field):
"""
Given the OGRGeomType for a geometry and its associated GeometryField,
determine whether the geometry should be turned into a GeometryCollection.
"""
return (geom_type.num in self.MULTI_TYPES and
model_field.__class__.__name__ == 'Multi%s' % geom_type.django)
def save(self, verbose=False, fid_range=False, step=False,
progress=False, silent=False, stream=sys.stdout, strict=False):
"""
Saves the contents from the OGR DataSource Layer into the database
according to the mapping dictionary given at initialization.
Keyword Parameters:
verbose:
If set, information will be printed subsequent to each model save
executed on the database.
fid_range:
May be set with a slice or tuple of (begin, end) feature ID's to map
from the data source. In other words, this keyword enables the user
to selectively import a subset range of features in the geographic
data source.
step:
If set with an integer, transactions will occur at every step
interval. For example, if step=1000, a commit would occur after
the 1,000th feature, the 2,000th feature etc.
progress:
When this keyword is set, status information will be printed giving
the number of features processed and successfully saved. By default,
progress information will pe printed every 1000 features processed,
however, this default may be overridden by setting this keyword with an
integer for the desired interval.
stream:
Status information will be written to this file handle. Defaults to
using `sys.stdout`, but any object with a `write` method is supported.
silent:
By default, non-fatal error notifications are printed to stdout, but
this keyword may be set to disable these notifications.
strict:
Execution of the model mapping will cease upon the first error
encountered. The default behavior is to attempt to continue.
"""
# Getting the default Feature ID range.
default_range = self.check_fid_range(fid_range)
# Setting the progress interval, if requested.
if progress:
if progress is True or not isinstance(progress, int):
progress_interval = 1000
else:
progress_interval = progress
def _save(feat_range=default_range, num_feat=0, num_saved=0):
if feat_range:
layer_iter = self.layer[feat_range]
else:
layer_iter = self.layer
for feat in layer_iter:
num_feat += 1
# Getting the keyword arguments
try:
kwargs = self.feature_kwargs(feat)
except LayerMapError as msg:
# Something borked the validation
if strict:
raise
elif not silent:
stream.write('Ignoring Feature ID %s because: %s\n' % (feat.fid, msg))
else:
# Constructing the model using the keyword args
is_update = False
if self.unique:
# If we want unique models on a particular field, handle the
# geometry appropriately.
try:
# Getting the keyword arguments and retrieving
# the unique model.
u_kwargs = self.unique_kwargs(kwargs)
m = self.model.objects.using(self.using).get(**u_kwargs)
is_update = True
# Getting the geometry (in OGR form), creating
# one from the kwargs WKT, adding in additional
# geometries, and update the attribute with the
# just-updated geometry WKT.
geom = getattr(m, self.geom_field).ogr
new = OGRGeometry(kwargs[self.geom_field])
for g in new:
geom.add(g)
setattr(m, self.geom_field, geom.wkt)
except ObjectDoesNotExist:
# No unique model exists yet, create.
m = self.model(**kwargs)
else:
m = self.model(**kwargs)
try:
# Attempting to save.
m.save(using=self.using)
num_saved += 1
if verbose:
stream.write('%s: %s\n' % ('Updated' if is_update else 'Saved', m))
except Exception as msg:
if strict:
# Bailing out if the `strict` keyword is set.
if not silent:
stream.write(
'Failed to save the feature (id: %s) into the '
'model with the keyword arguments:\n' % feat.fid
)
stream.write('%s\n' % kwargs)
raise
elif not silent:
stream.write('Failed to save %s:\n %s\nContinuing\n' % (kwargs, msg))
# Printing progress information, if requested.
if progress and num_feat % progress_interval == 0:
stream.write('Processed %d features, saved %d ...\n' % (num_feat, num_saved))
# Only used for status output purposes -- incremental saving uses the
# values returned here.
return num_saved, num_feat
if self.transaction_decorator is not None:
_save = self.transaction_decorator(_save)
nfeat = self.layer.num_feat
if step and isinstance(step, int) and step < nfeat:
# Incremental saving is requested at the given interval (step)
if default_range:
raise LayerMapError('The `step` keyword may not be used in conjunction with the `fid_range` keyword.')
beg, num_feat, num_saved = (0, 0, 0)
indices = range(step, nfeat, step)
n_i = len(indices)
for i, end in enumerate(indices):
# Constructing the slice to use for this step; the last slice is
# special (e.g, [100:] instead of [90:100]).
if i + 1 == n_i:
step_slice = slice(beg, None)
else:
step_slice = slice(beg, end)
try:
num_feat, num_saved = _save(step_slice, num_feat, num_saved)
beg = end
except: # Deliberately catch everything
stream.write('%s\nFailed to save slice: %s\n' % ('=-' * 20, step_slice))
raise
else:
# Otherwise, just calling the previously defined _save() function.
_save()
|
bsd-3-clause
|
swenson/sagewiki
|
unidecode/unidecode/x070.py
|
252
|
4693
|
data = (
'You ', # 0x00
'Yang ', # 0x01
'Lu ', # 0x02
'Si ', # 0x03
'Jie ', # 0x04
'Ying ', # 0x05
'Du ', # 0x06
'Wang ', # 0x07
'Hui ', # 0x08
'Xie ', # 0x09
'Pan ', # 0x0a
'Shen ', # 0x0b
'Biao ', # 0x0c
'Chan ', # 0x0d
'Mo ', # 0x0e
'Liu ', # 0x0f
'Jian ', # 0x10
'Pu ', # 0x11
'Se ', # 0x12
'Cheng ', # 0x13
'Gu ', # 0x14
'Bin ', # 0x15
'Huo ', # 0x16
'Xian ', # 0x17
'Lu ', # 0x18
'Qin ', # 0x19
'Han ', # 0x1a
'Ying ', # 0x1b
'Yong ', # 0x1c
'Li ', # 0x1d
'Jing ', # 0x1e
'Xiao ', # 0x1f
'Ying ', # 0x20
'Sui ', # 0x21
'Wei ', # 0x22
'Xie ', # 0x23
'Huai ', # 0x24
'Hao ', # 0x25
'Zhu ', # 0x26
'Long ', # 0x27
'Lai ', # 0x28
'Dui ', # 0x29
'Fan ', # 0x2a
'Hu ', # 0x2b
'Lai ', # 0x2c
'[?] ', # 0x2d
'[?] ', # 0x2e
'Ying ', # 0x2f
'Mi ', # 0x30
'Ji ', # 0x31
'Lian ', # 0x32
'Jian ', # 0x33
'Ying ', # 0x34
'Fen ', # 0x35
'Lin ', # 0x36
'Yi ', # 0x37
'Jian ', # 0x38
'Yue ', # 0x39
'Chan ', # 0x3a
'Dai ', # 0x3b
'Rang ', # 0x3c
'Jian ', # 0x3d
'Lan ', # 0x3e
'Fan ', # 0x3f
'Shuang ', # 0x40
'Yuan ', # 0x41
'Zhuo ', # 0x42
'Feng ', # 0x43
'She ', # 0x44
'Lei ', # 0x45
'Lan ', # 0x46
'Cong ', # 0x47
'Qu ', # 0x48
'Yong ', # 0x49
'Qian ', # 0x4a
'Fa ', # 0x4b
'Guan ', # 0x4c
'Que ', # 0x4d
'Yan ', # 0x4e
'Hao ', # 0x4f
'Hyeng ', # 0x50
'Sa ', # 0x51
'Zan ', # 0x52
'Luan ', # 0x53
'Yan ', # 0x54
'Li ', # 0x55
'Mi ', # 0x56
'Shan ', # 0x57
'Tan ', # 0x58
'Dang ', # 0x59
'Jiao ', # 0x5a
'Chan ', # 0x5b
'[?] ', # 0x5c
'Hao ', # 0x5d
'Ba ', # 0x5e
'Zhu ', # 0x5f
'Lan ', # 0x60
'Lan ', # 0x61
'Nang ', # 0x62
'Wan ', # 0x63
'Luan ', # 0x64
'Xun ', # 0x65
'Xian ', # 0x66
'Yan ', # 0x67
'Gan ', # 0x68
'Yan ', # 0x69
'Yu ', # 0x6a
'Huo ', # 0x6b
'Si ', # 0x6c
'Mie ', # 0x6d
'Guang ', # 0x6e
'Deng ', # 0x6f
'Hui ', # 0x70
'Xiao ', # 0x71
'Xiao ', # 0x72
'Hu ', # 0x73
'Hong ', # 0x74
'Ling ', # 0x75
'Zao ', # 0x76
'Zhuan ', # 0x77
'Jiu ', # 0x78
'Zha ', # 0x79
'Xie ', # 0x7a
'Chi ', # 0x7b
'Zhuo ', # 0x7c
'Zai ', # 0x7d
'Zai ', # 0x7e
'Can ', # 0x7f
'Yang ', # 0x80
'Qi ', # 0x81
'Zhong ', # 0x82
'Fen ', # 0x83
'Niu ', # 0x84
'Jiong ', # 0x85
'Wen ', # 0x86
'Po ', # 0x87
'Yi ', # 0x88
'Lu ', # 0x89
'Chui ', # 0x8a
'Pi ', # 0x8b
'Kai ', # 0x8c
'Pan ', # 0x8d
'Yan ', # 0x8e
'Kai ', # 0x8f
'Pang ', # 0x90
'Mu ', # 0x91
'Chao ', # 0x92
'Liao ', # 0x93
'Gui ', # 0x94
'Kang ', # 0x95
'Tun ', # 0x96
'Guang ', # 0x97
'Xin ', # 0x98
'Zhi ', # 0x99
'Guang ', # 0x9a
'Guang ', # 0x9b
'Wei ', # 0x9c
'Qiang ', # 0x9d
'[?] ', # 0x9e
'Da ', # 0x9f
'Xia ', # 0xa0
'Zheng ', # 0xa1
'Zhu ', # 0xa2
'Ke ', # 0xa3
'Zhao ', # 0xa4
'Fu ', # 0xa5
'Ba ', # 0xa6
'Duo ', # 0xa7
'Duo ', # 0xa8
'Ling ', # 0xa9
'Zhuo ', # 0xaa
'Xuan ', # 0xab
'Ju ', # 0xac
'Tan ', # 0xad
'Pao ', # 0xae
'Jiong ', # 0xaf
'Pao ', # 0xb0
'Tai ', # 0xb1
'Tai ', # 0xb2
'Bing ', # 0xb3
'Yang ', # 0xb4
'Tong ', # 0xb5
'Han ', # 0xb6
'Zhu ', # 0xb7
'Zha ', # 0xb8
'Dian ', # 0xb9
'Wei ', # 0xba
'Shi ', # 0xbb
'Lian ', # 0xbc
'Chi ', # 0xbd
'Huang ', # 0xbe
'[?] ', # 0xbf
'Hu ', # 0xc0
'Shuo ', # 0xc1
'Lan ', # 0xc2
'Jing ', # 0xc3
'Jiao ', # 0xc4
'Xu ', # 0xc5
'Xing ', # 0xc6
'Quan ', # 0xc7
'Lie ', # 0xc8
'Huan ', # 0xc9
'Yang ', # 0xca
'Xiao ', # 0xcb
'Xiu ', # 0xcc
'Xian ', # 0xcd
'Yin ', # 0xce
'Wu ', # 0xcf
'Zhou ', # 0xd0
'Yao ', # 0xd1
'Shi ', # 0xd2
'Wei ', # 0xd3
'Tong ', # 0xd4
'Xue ', # 0xd5
'Zai ', # 0xd6
'Kai ', # 0xd7
'Hong ', # 0xd8
'Luo ', # 0xd9
'Xia ', # 0xda
'Zhu ', # 0xdb
'Xuan ', # 0xdc
'Zheng ', # 0xdd
'Po ', # 0xde
'Yan ', # 0xdf
'Hui ', # 0xe0
'Guang ', # 0xe1
'Zhe ', # 0xe2
'Hui ', # 0xe3
'Kao ', # 0xe4
'[?] ', # 0xe5
'Fan ', # 0xe6
'Shao ', # 0xe7
'Ye ', # 0xe8
'Hui ', # 0xe9
'[?] ', # 0xea
'Tang ', # 0xeb
'Jin ', # 0xec
'Re ', # 0xed
'[?] ', # 0xee
'Xi ', # 0xef
'Fu ', # 0xf0
'Jiong ', # 0xf1
'Che ', # 0xf2
'Pu ', # 0xf3
'Jing ', # 0xf4
'Zhuo ', # 0xf5
'Ting ', # 0xf6
'Wan ', # 0xf7
'Hai ', # 0xf8
'Peng ', # 0xf9
'Lang ', # 0xfa
'Shan ', # 0xfb
'Hu ', # 0xfc
'Feng ', # 0xfd
'Chi ', # 0xfe
'Rong ', # 0xff
)
|
gpl-2.0
|
jacklopessapo/support-tools
|
googlecode-issues-exporter/bitbucket_issue_converter.py
|
92
|
6834
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for converting Google Code issues to a format accepted by BitBucket.
Most BitBucket concepts map cleanly to their Google Code equivalent, with the
exception of the following:
- Issue Assignee is called an Owner
- Issue Reporter is called an Author
- Comment User is called an Author
"""
import argparse
import json
import sys
import issues
def _getKind(kind):
mapping = {
"defect": "bug",
"enhancement": "enhancement",
"task": "task",
"review": "proposal",
"other": "bug",
}
return mapping.get(kind.lower(), "bug")
def _getPriority(priority):
mapping = {
"low": "trivial",
"medium": "minor",
"high": "major",
"critical": "critical",
}
return mapping.get(priority.lower(), "minor")
def _getStatus(status):
mapping = {
"new": "new",
"fixed": "resolved",
"invalid": "invalid",
"duplicate": "duplicate",
"wontfix": "wontfix",
}
return mapping.get(status.lower(), "new")
def _getTitle(title):
if len(title) < 255:
return title
return title[:250] + "[...]"
class UserService(issues.UserService):
"""BitBucket user operations.
"""
def IsUser(self, username):
"""Returns wheter a username is a valid user.
BitBucket does not have a user api, so accept all usernames.
"""
return True
class IssueService(issues.IssueService):
"""Abstract issue operations.
Handles creating and updating issues and comments on an user API.
"""
def __init__(self):
self._bitbucket_issues = []
self._bitbucket_comments = []
def GetIssues(self, state="open"):
"""Gets all of the issue for the repository.
Since BitBucket does not have an issue API, always returns an empty list.
Args:
state: The state of the repository can be either 'open' or 'closed'.
Returns:
An empty list.
"""
return []
def CreateIssue(self, googlecode_issue):
"""Creates an issue.
Args:
googlecode_issue: An instance of GoogleCodeIssue
Returns:
The issue number of the new issue.
Raises:
ServiceError: An error occurred creating the issue.
"""
bitbucket_issue = {
"assignee": googlecode_issue.GetOwner(),
"content": googlecode_issue.GetDescription(),
"content_updated_on": googlecode_issue.GetContentUpdatedOn(),
"created_on": googlecode_issue.GetCreatedOn(),
"id": googlecode_issue.GetId(),
"kind": _getKind(googlecode_issue.GetKind()),
"priority": _getPriority(googlecode_issue.GetPriority()),
"reporter": googlecode_issue.GetAuthor(),
"status": _getStatus(googlecode_issue.GetStatus()),
"title": _getTitle(googlecode_issue.GetTitle()),
"updated_on": googlecode_issue.GetUpdatedOn()
}
self._bitbucket_issues.append(bitbucket_issue)
return googlecode_issue.GetId()
def CloseIssue(self, issue_number):
"""Closes an issue.
Args:
issue_number: The issue number.
"""
def CreateComment(self, issue_number, googlecode_comment):
"""Creates a comment on an issue.
Args:
issue_number: The issue number.
googlecode_comment: An instance of GoogleCodeComment
"""
bitbucket_comment = {
"content": googlecode_comment.GetDescription(),
"created_on": googlecode_comment.GetCreatedOn(),
"id": googlecode_comment.GetId(),
"issue": googlecode_comment.GetIssue().GetId(),
"updated_on": googlecode_comment.GetUpdatedOn(),
"user": googlecode_comment.GetAuthor()
}
self._bitbucket_comments.append(bitbucket_comment)
def WriteIssueData(self, default_issue_kind):
"""Writes out the json issue and comments data to db-1.0.json.
"""
issues_data = {
"issues": self._bitbucket_issues,
"comments": self._bitbucket_comments,
"meta": {
"default_kind": default_issue_kind
}
}
with open("db-1.0.json", "w") as issues_file:
issues_json = json.dumps(issues_data, sort_keys=True, indent=4,
separators=(",", ": "))
issues_file.write(issues_json)
def ExportIssues(issue_file_path, project_name,
user_file_path, default_issue_kind):
"""Exports all issues for a given project.
"""
issue_service = IssueService()
user_service = UserService()
issue_data = issues.LoadIssueData(issue_file_path, project_name)
user_map = issues.LoadUserData(user_file_path, user_service)
issue_exporter = issues.IssueExporter(
issue_service, user_service, issue_data, project_name, user_map)
try:
issue_exporter.Init()
issue_exporter.Start()
issue_service.WriteIssueData(default_issue_kind)
print "\nDone!\n"
except IOError, e:
print "[IOError] ERROR: %s" % e
except issues.InvalidUserError, e:
print "[InvalidUserError] ERROR: %s" % e
def main(args):
"""The main function.
Args:
args: The command line arguments.
Raises:
ProjectNotFoundError: The user passed in an invalid project name.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--issue_file_path", required=True,
help="The path to the file containing the issues from"
"Google Code.")
parser.add_argument("--project_name", required=True,
help="The name of the Google Code project you wish to"
"export")
parser.add_argument("--user_file_path", required=False,
help="The path to the file containing a mapping from"
"email address to bitbucket username")
parser.add_argument("--default_issue_kind", required=False,
help="A non-null string containing one of the following"
"values: bug, enhancement, proposal, task. Defaults to"
"bug")
parsed_args, _ = parser.parse_known_args(args)
# Default value.
if not parsed_args.default_issue_kind:
print "Using default issue kind of 'bug'."
parsed_args.default_issue_kind = "bug"
ExportIssues(
parsed_args.issue_file_path, parsed_args.project_name,
parsed_args.user_file_path, parsed_args.default_issue_kind)
if __name__ == "__main__":
main(sys.argv)
|
apache-2.0
|
patrickm/chromium.src
|
build/android/test_runner.py
|
2
|
28778
|
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs all types of tests from one unified interface."""
import collections
import logging
import optparse
import os
import shutil
import signal
import sys
import threading
from pylib import android_commands
from pylib import constants
from pylib import forwarder
from pylib import ports
from pylib.base import base_test_result
from pylib.base import test_dispatcher
from pylib.gtest import gtest_config
from pylib.gtest import setup as gtest_setup
from pylib.gtest import test_options as gtest_test_options
from pylib.linker import setup as linker_setup
from pylib.host_driven import setup as host_driven_setup
from pylib.instrumentation import setup as instrumentation_setup
from pylib.instrumentation import test_options as instrumentation_test_options
from pylib.monkey import setup as monkey_setup
from pylib.monkey import test_options as monkey_test_options
from pylib.perf import setup as perf_setup
from pylib.perf import test_options as perf_test_options
from pylib.perf import test_runner as perf_test_runner
from pylib.uiautomator import setup as uiautomator_setup
from pylib.uiautomator import test_options as uiautomator_test_options
from pylib.utils import command_option_parser
from pylib.utils import report_results
from pylib.utils import reraiser_thread
from pylib.utils import run_tests_helper
def AddCommonOptions(option_parser):
"""Adds all common options to |option_parser|."""
group = optparse.OptionGroup(option_parser, 'Common Options')
default_build_type = os.environ.get('BUILDTYPE', 'Debug')
group.add_option('--debug', action='store_const', const='Debug',
dest='build_type', default=default_build_type,
help=('If set, run test suites under out/Debug. '
'Default is env var BUILDTYPE or Debug.'))
group.add_option('--release', action='store_const',
const='Release', dest='build_type',
help=('If set, run test suites under out/Release.'
' Default is env var BUILDTYPE or Debug.'))
group.add_option('-c', dest='cleanup_test_files',
help='Cleanup test files on the device after run',
action='store_true')
group.add_option('--num_retries', dest='num_retries', type='int',
default=2,
help=('Number of retries for a test before '
'giving up.'))
group.add_option('-v',
'--verbose',
dest='verbose_count',
default=0,
action='count',
help='Verbose level (multiple times for more)')
group.add_option('--tool',
dest='tool',
help=('Run the test under a tool '
'(use --tool help to list them)'))
group.add_option('--flakiness-dashboard-server',
dest='flakiness_dashboard_server',
help=('Address of the server that is hosting the '
'Chrome for Android flakiness dashboard.'))
group.add_option('--skip-deps-push', dest='push_deps',
action='store_false', default=True,
help=('Do not push dependencies to the device. '
'Use this at own risk for speeding up test '
'execution on local machine.'))
group.add_option('-d', '--device', dest='test_device',
help=('Target device for the test suite '
'to run on.'))
option_parser.add_option_group(group)
def ProcessCommonOptions(options):
"""Processes and handles all common options."""
run_tests_helper.SetLogLevel(options.verbose_count)
constants.SetBuildType(options.build_type)
def AddGTestOptions(option_parser):
"""Adds gtest options to |option_parser|."""
option_parser.usage = '%prog gtest [options]'
option_parser.commands_dict = {}
option_parser.example = '%prog gtest -s base_unittests'
# TODO(gkanwar): Make this option required
option_parser.add_option('-s', '--suite', dest='suite_name',
help=('Executable name of the test suite to run '
'(use -s help to list them).'))
option_parser.add_option('-f', '--gtest_filter', '--gtest-filter',
dest='test_filter',
help='googletest-style filter string.')
option_parser.add_option('--gtest_also_run_disabled_tests',
'--gtest-also-run-disabled-tests',
dest='run_disabled', action='store_true',
help='Also run disabled tests if applicable.')
option_parser.add_option('-a', '--test-arguments', dest='test_arguments',
default='',
help='Additional arguments to pass to the test.')
option_parser.add_option('-t', dest='timeout',
help='Timeout to wait for each test',
type='int',
default=60)
# TODO(gkanwar): Move these to Common Options once we have the plumbing
# in our other test types to handle these commands
AddCommonOptions(option_parser)
def AddLinkerTestOptions(option_parser):
option_parser.usage = '%prog linker'
option_parser.commands_dict = {}
option_parser.example = '%prog linker'
option_parser.add_option('-f', '--gtest-filter', dest='test_filter',
help='googletest-style filter string.')
AddCommonOptions(option_parser)
def ProcessGTestOptions(options):
"""Intercept test suite help to list test suites.
Args:
options: Command line options.
"""
if options.suite_name == 'help':
print 'Available test suites are:'
for test_suite in (gtest_config.STABLE_TEST_SUITES +
gtest_config.EXPERIMENTAL_TEST_SUITES):
print test_suite
sys.exit(0)
# Convert to a list, assuming all test suites if nothing was specified.
# TODO(gkanwar): Require having a test suite
if options.suite_name:
options.suite_name = [options.suite_name]
else:
options.suite_name = [s for s in gtest_config.STABLE_TEST_SUITES]
def AddJavaTestOptions(option_parser):
"""Adds the Java test options to |option_parser|."""
option_parser.add_option('-f', '--test-filter', dest='test_filter',
help=('Test filter (if not fully qualified, '
'will run all matches).'))
option_parser.add_option(
'-A', '--annotation', dest='annotation_str',
help=('Comma-separated list of annotations. Run only tests with any of '
'the given annotations. An annotation can be either a key or a '
'key-values pair. A test that has no annotation is considered '
'"SmallTest".'))
option_parser.add_option(
'-E', '--exclude-annotation', dest='exclude_annotation_str',
help=('Comma-separated list of annotations. Exclude tests with these '
'annotations.'))
option_parser.add_option('--screenshot', dest='screenshot_failures',
action='store_true',
help='Capture screenshots of test failures')
option_parser.add_option('--save-perf-json', action='store_true',
help='Saves the JSON file for each UI Perf test.')
option_parser.add_option('--official-build', action='store_true',
help='Run official build tests.')
option_parser.add_option('--test_data', action='append', default=[],
help=('Each instance defines a directory of test '
'data that should be copied to the target(s) '
'before running the tests. The argument '
'should be of the form <target>:<source>, '
'<target> is relative to the device data'
'directory, and <source> is relative to the '
'chromium build directory.'))
def ProcessJavaTestOptions(options):
"""Processes options/arguments and populates |options| with defaults."""
if options.annotation_str:
options.annotations = options.annotation_str.split(',')
elif options.test_filter:
options.annotations = []
else:
options.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest',
'EnormousTest']
if options.exclude_annotation_str:
options.exclude_annotations = options.exclude_annotation_str.split(',')
else:
options.exclude_annotations = []
def AddInstrumentationTestOptions(option_parser):
"""Adds Instrumentation test options to |option_parser|."""
option_parser.usage = '%prog instrumentation [options]'
option_parser.commands_dict = {}
option_parser.example = ('%prog instrumentation '
'--test-apk=ChromeShellTest')
AddJavaTestOptions(option_parser)
AddCommonOptions(option_parser)
option_parser.add_option('-j', '--java-only', action='store_true',
default=False, help='Run only the Java tests.')
option_parser.add_option('-p', '--python-only', action='store_true',
default=False,
help='Run only the host-driven tests.')
option_parser.add_option('--host-driven-root',
help='Root of the host-driven tests.')
option_parser.add_option('-w', '--wait_debugger', dest='wait_for_debugger',
action='store_true',
help='Wait for debugger.')
option_parser.add_option(
'--test-apk', dest='test_apk',
help=('The name of the apk containing the tests '
'(without the .apk extension; e.g. "ContentShellTest").'))
option_parser.add_option('--coverage-dir',
help=('Directory in which to place all generated '
'EMMA coverage files.'))
def ProcessInstrumentationOptions(options, error_func):
"""Processes options/arguments and populate |options| with defaults.
Args:
options: optparse.Options object.
error_func: Function to call with the error message in case of an error.
Returns:
An InstrumentationOptions named tuple which contains all options relevant to
instrumentation tests.
"""
ProcessJavaTestOptions(options)
if options.java_only and options.python_only:
error_func('Options java_only (-j) and python_only (-p) '
'are mutually exclusive.')
options.run_java_tests = True
options.run_python_tests = True
if options.java_only:
options.run_python_tests = False
elif options.python_only:
options.run_java_tests = False
if not options.host_driven_root:
options.run_python_tests = False
if not options.test_apk:
error_func('--test-apk must be specified.')
options.test_apk_path = os.path.join(constants.GetOutDirectory(),
constants.SDK_BUILD_APKS_DIR,
'%s.apk' % options.test_apk)
options.test_apk_jar_path = os.path.join(
constants.GetOutDirectory(),
constants.SDK_BUILD_TEST_JAVALIB_DIR,
'%s.jar' % options.test_apk)
return instrumentation_test_options.InstrumentationOptions(
options.tool,
options.cleanup_test_files,
options.push_deps,
options.annotations,
options.exclude_annotations,
options.test_filter,
options.test_data,
options.save_perf_json,
options.screenshot_failures,
options.wait_for_debugger,
options.coverage_dir,
options.test_apk,
options.test_apk_path,
options.test_apk_jar_path)
def AddUIAutomatorTestOptions(option_parser):
"""Adds UI Automator test options to |option_parser|."""
option_parser.usage = '%prog uiautomator [options]'
option_parser.commands_dict = {}
option_parser.example = (
'%prog uiautomator --test-jar=chrome_shell_uiautomator_tests'
' --package=chrome_shell')
option_parser.add_option(
'--package',
help=('Package under test. Possible values: %s' %
constants.PACKAGE_INFO.keys()))
option_parser.add_option(
'--test-jar', dest='test_jar',
help=('The name of the dexed jar containing the tests (without the '
'.dex.jar extension). Alternatively, this can be a full path '
'to the jar.'))
AddJavaTestOptions(option_parser)
AddCommonOptions(option_parser)
def ProcessUIAutomatorOptions(options, error_func):
"""Processes UIAutomator options/arguments.
Args:
options: optparse.Options object.
error_func: Function to call with the error message in case of an error.
Returns:
A UIAutomatorOptions named tuple which contains all options relevant to
uiautomator tests.
"""
ProcessJavaTestOptions(options)
if not options.package:
error_func('--package is required.')
if options.package not in constants.PACKAGE_INFO:
error_func('Invalid package.')
if not options.test_jar:
error_func('--test-jar must be specified.')
if os.path.exists(options.test_jar):
# The dexed JAR is fully qualified, assume the info JAR lives along side.
options.uiautomator_jar = options.test_jar
else:
options.uiautomator_jar = os.path.join(
constants.GetOutDirectory(),
constants.SDK_BUILD_JAVALIB_DIR,
'%s.dex.jar' % options.test_jar)
options.uiautomator_info_jar = (
options.uiautomator_jar[:options.uiautomator_jar.find('.dex.jar')] +
'_java.jar')
return uiautomator_test_options.UIAutomatorOptions(
options.tool,
options.cleanup_test_files,
options.push_deps,
options.annotations,
options.exclude_annotations,
options.test_filter,
options.test_data,
options.save_perf_json,
options.screenshot_failures,
options.uiautomator_jar,
options.uiautomator_info_jar,
options.package)
def AddMonkeyTestOptions(option_parser):
"""Adds monkey test options to |option_parser|."""
option_parser.usage = '%prog monkey [options]'
option_parser.commands_dict = {}
option_parser.example = (
'%prog monkey --package=chrome_shell')
option_parser.add_option(
'--package',
help=('Package under test. Possible values: %s' %
constants.PACKAGE_INFO.keys()))
option_parser.add_option(
'--event-count', default=10000, type='int',
help='Number of events to generate [default: %default].')
option_parser.add_option(
'--category', default='',
help='A list of allowed categories.')
option_parser.add_option(
'--throttle', default=100, type='int',
help='Delay between events (ms) [default: %default]. ')
option_parser.add_option(
'--seed', type='int',
help=('Seed value for pseudo-random generator. Same seed value generates '
'the same sequence of events. Seed is randomized by default.'))
option_parser.add_option(
'--extra-args', default='',
help=('String of other args to pass to the command verbatim '
'[default: "%default"].'))
AddCommonOptions(option_parser)
def ProcessMonkeyTestOptions(options, error_func):
"""Processes all monkey test options.
Args:
options: optparse.Options object.
error_func: Function to call with the error message in case of an error.
Returns:
A MonkeyOptions named tuple which contains all options relevant to
monkey tests.
"""
if not options.package:
error_func('--package is required.')
if options.package not in constants.PACKAGE_INFO:
error_func('Invalid package.')
category = options.category
if category:
category = options.category.split(',')
return monkey_test_options.MonkeyOptions(
options.verbose_count,
options.package,
options.event_count,
category,
options.throttle,
options.seed,
options.extra_args)
def AddPerfTestOptions(option_parser):
"""Adds perf test options to |option_parser|."""
option_parser.usage = '%prog perf [options]'
option_parser.commands_dict = {}
option_parser.example = ('%prog perf '
'[--single-step -- command args] or '
'[--steps perf_steps.json] or '
'[--print-step step]')
option_parser.add_option(
'--single-step',
action='store_true',
help='Execute the given command with retries, but only print the result '
'for the "most successful" round.')
option_parser.add_option(
'--steps',
help='JSON file containing the list of commands to run.')
option_parser.add_option(
'--flaky-steps',
help=('A JSON file containing steps that are flaky '
'and will have its exit code ignored.'))
option_parser.add_option(
'--print-step',
help='The name of a previously executed perf step to print.')
option_parser.add_option(
'--no-timeout', action='store_true',
help=('Do not impose a timeout. Each perf step is responsible for '
'implementing the timeout logic.'))
option_parser.add_option(
'-f', '--test-filter',
help=('Test filter (will match against the names listed in --steps).'))
option_parser.add_option(
'--dry-run',
action='store_true',
help='Just print the steps without executing.')
AddCommonOptions(option_parser)
def ProcessPerfTestOptions(options, args, error_func):
"""Processes all perf test options.
Args:
options: optparse.Options object.
error_func: Function to call with the error message in case of an error.
Returns:
A PerfOptions named tuple which contains all options relevant to
perf tests.
"""
# Only one of steps, print_step or single_step must be provided.
count = len(filter(None,
[options.steps, options.print_step, options.single_step]))
if count != 1:
error_func('Please specify one of: --steps, --print-step, --single-step.')
single_step = None
if options.single_step:
single_step = ' '.join(args[2:])
return perf_test_options.PerfOptions(
options.steps, options.flaky_steps, options.print_step,
options.no_timeout, options.test_filter, options.dry_run,
single_step)
def _RunGTests(options, devices):
"""Subcommand of RunTestsCommands which runs gtests."""
ProcessGTestOptions(options)
exit_code = 0
for suite_name in options.suite_name:
# TODO(gkanwar): Move this into ProcessGTestOptions once we require -s for
# the gtest command.
gtest_options = gtest_test_options.GTestOptions(
options.tool,
options.cleanup_test_files,
options.push_deps,
options.test_filter,
options.run_disabled,
options.test_arguments,
options.timeout,
suite_name)
runner_factory, tests = gtest_setup.Setup(gtest_options, devices)
results, test_exit_code = test_dispatcher.RunTests(
tests, runner_factory, devices, shard=True, test_timeout=None,
num_retries=options.num_retries)
if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
exit_code = test_exit_code
report_results.LogFull(
results=results,
test_type='Unit test',
test_package=suite_name,
flakiness_server=options.flakiness_dashboard_server)
if os.path.isdir(constants.ISOLATE_DEPS_DIR):
shutil.rmtree(constants.ISOLATE_DEPS_DIR)
return exit_code
def _RunLinkerTests(options, devices):
"""Subcommand of RunTestsCommands which runs linker tests."""
runner_factory, tests = linker_setup.Setup(options, devices)
results, exit_code = test_dispatcher.RunTests(
tests, runner_factory, devices, shard=True, test_timeout=60,
num_retries=options.num_retries)
report_results.LogFull(
results=results,
test_type='Linker test',
test_package='ChromiumLinkerTest')
return exit_code
def _RunInstrumentationTests(options, error_func, devices):
"""Subcommand of RunTestsCommands which runs instrumentation tests."""
instrumentation_options = ProcessInstrumentationOptions(options, error_func)
if len(devices) > 1 and options.wait_for_debugger:
logging.warning('Debugger can not be sharded, using first available device')
devices = devices[:1]
results = base_test_result.TestRunResults()
exit_code = 0
if options.run_java_tests:
runner_factory, tests = instrumentation_setup.Setup(instrumentation_options)
test_results, exit_code = test_dispatcher.RunTests(
tests, runner_factory, devices, shard=True, test_timeout=None,
num_retries=options.num_retries)
results.AddTestRunResults(test_results)
if options.run_python_tests:
runner_factory, tests = host_driven_setup.InstrumentationSetup(
options.host_driven_root, options.official_build,
instrumentation_options)
if tests:
test_results, test_exit_code = test_dispatcher.RunTests(
tests, runner_factory, devices, shard=True, test_timeout=None,
num_retries=options.num_retries)
results.AddTestRunResults(test_results)
# Only allow exit code escalation
if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
exit_code = test_exit_code
report_results.LogFull(
results=results,
test_type='Instrumentation',
test_package=os.path.basename(options.test_apk),
annotation=options.annotations,
flakiness_server=options.flakiness_dashboard_server)
return exit_code
def _RunUIAutomatorTests(options, error_func, devices):
"""Subcommand of RunTestsCommands which runs uiautomator tests."""
uiautomator_options = ProcessUIAutomatorOptions(options, error_func)
runner_factory, tests = uiautomator_setup.Setup(uiautomator_options)
results, exit_code = test_dispatcher.RunTests(
tests, runner_factory, devices, shard=True, test_timeout=None,
num_retries=options.num_retries)
report_results.LogFull(
results=results,
test_type='UIAutomator',
test_package=os.path.basename(options.test_jar),
annotation=options.annotations,
flakiness_server=options.flakiness_dashboard_server)
return exit_code
def _RunMonkeyTests(options, error_func, devices):
"""Subcommand of RunTestsCommands which runs monkey tests."""
monkey_options = ProcessMonkeyTestOptions(options, error_func)
runner_factory, tests = monkey_setup.Setup(monkey_options)
results, exit_code = test_dispatcher.RunTests(
tests, runner_factory, devices, shard=False, test_timeout=None,
num_retries=options.num_retries)
report_results.LogFull(
results=results,
test_type='Monkey',
test_package='Monkey')
return exit_code
def _RunPerfTests(options, args, error_func, devices):
"""Subcommand of RunTestsCommands which runs perf tests."""
perf_options = ProcessPerfTestOptions(options, args, error_func)
# Just print the results from a single previously executed step.
if perf_options.print_step:
return perf_test_runner.PrintTestOutput(perf_options.print_step)
runner_factory, tests = perf_setup.Setup(perf_options)
results, _ = test_dispatcher.RunTests(
tests, runner_factory, devices, shard=True, test_timeout=None,
num_retries=options.num_retries)
report_results.LogFull(
results=results,
test_type='Perf',
test_package='Perf')
if perf_options.single_step:
return perf_test_runner.PrintTestOutput('single_step')
perf_test_runner.PrintSummary(tests)
# Always return 0 on the sharding stage. Individual tests exit_code
# will be returned on the print_step stage.
return 0
def _GetAttachedDevices(test_device=None):
"""Get all attached devices.
Args:
test_device: Name of a specific device to use.
Returns:
A list of attached devices.
"""
attached_devices = []
attached_devices = android_commands.GetAttachedDevices()
if test_device:
assert test_device in attached_devices, (
'Did not find device %s among attached device. Attached devices: %s'
% (test_device, ', '.join(attached_devices)))
attached_devices = [test_device]
assert attached_devices, 'No devices attached.'
return sorted(attached_devices)
def RunTestsCommand(command, options, args, option_parser):
"""Checks test type and dispatches to the appropriate function.
Args:
command: String indicating the command that was received to trigger
this function.
options: optparse options dictionary.
args: List of extra args from optparse.
option_parser: optparse.OptionParser object.
Returns:
Integer indicated exit code.
Raises:
Exception: Unknown command name passed in, or an exception from an
individual test runner.
"""
# Check for extra arguments
if len(args) > 2 and command != 'perf':
option_parser.error('Unrecognized arguments: %s' % (' '.join(args[2:])))
return constants.ERROR_EXIT_CODE
if command == 'perf':
if ((options.single_step and len(args) <= 2) or
(not options.single_step and len(args) > 2)):
option_parser.error('Unrecognized arguments: %s' % (' '.join(args)))
return constants.ERROR_EXIT_CODE
ProcessCommonOptions(options)
devices = _GetAttachedDevices(options.test_device)
forwarder.Forwarder.RemoveHostLog()
if not ports.ResetTestServerPortAllocation():
raise Exception('Failed to reset test server port.')
if command == 'gtest':
return _RunGTests(options, devices)
elif command == 'linker':
return _RunLinkerTests(options, devices)
elif command == 'instrumentation':
return _RunInstrumentationTests(options, option_parser.error, devices)
elif command == 'uiautomator':
return _RunUIAutomatorTests(options, option_parser.error, devices)
elif command == 'monkey':
return _RunMonkeyTests(options, option_parser.error, devices)
elif command == 'perf':
return _RunPerfTests(options, args, option_parser.error, devices)
else:
raise Exception('Unknown test type.')
def HelpCommand(command, _options, args, option_parser):
"""Display help for a certain command, or overall help.
Args:
command: String indicating the command that was received to trigger
this function.
options: optparse options dictionary. unused.
args: List of extra args from optparse.
option_parser: optparse.OptionParser object.
Returns:
Integer indicated exit code.
"""
# If we don't have any args, display overall help
if len(args) < 3:
option_parser.print_help()
return 0
# If we have too many args, print an error
if len(args) > 3:
option_parser.error('Unrecognized arguments: %s' % (' '.join(args[3:])))
return constants.ERROR_EXIT_CODE
command = args[2]
if command not in VALID_COMMANDS:
option_parser.error('Unrecognized command.')
# Treat the help command as a special case. We don't care about showing a
# specific help page for itself.
if command == 'help':
option_parser.print_help()
return 0
VALID_COMMANDS[command].add_options_func(option_parser)
option_parser.usage = '%prog ' + command + ' [options]'
option_parser.commands_dict = {}
option_parser.print_help()
return 0
# Define a named tuple for the values in the VALID_COMMANDS dictionary so the
# syntax is a bit prettier. The tuple is two functions: (add options, run
# command).
CommandFunctionTuple = collections.namedtuple(
'CommandFunctionTuple', ['add_options_func', 'run_command_func'])
VALID_COMMANDS = {
'gtest': CommandFunctionTuple(AddGTestOptions, RunTestsCommand),
'instrumentation': CommandFunctionTuple(
AddInstrumentationTestOptions, RunTestsCommand),
'uiautomator': CommandFunctionTuple(
AddUIAutomatorTestOptions, RunTestsCommand),
'monkey': CommandFunctionTuple(
AddMonkeyTestOptions, RunTestsCommand),
'perf': CommandFunctionTuple(
AddPerfTestOptions, RunTestsCommand),
'linker': CommandFunctionTuple(
AddLinkerTestOptions, RunTestsCommand),
'help': CommandFunctionTuple(lambda option_parser: None, HelpCommand)
}
def DumpThreadStacks(_signal, _frame):
for thread in threading.enumerate():
reraiser_thread.LogThreadStack(thread)
def main():
signal.signal(signal.SIGUSR1, DumpThreadStacks)
option_parser = command_option_parser.CommandOptionParser(
commands_dict=VALID_COMMANDS)
return command_option_parser.ParseAndExecute(option_parser)
if __name__ == '__main__':
sys.exit(main())
|
bsd-3-clause
|
jkankiewicz/kivy
|
examples/canvas/lines_extended.py
|
21
|
3977
|
'''
Lines Extended Demo
===================
This demonstrates how to use the extended line drawing routines such
as circles, ellipses, and rectangles. You should see a static image of
labelled shapes on the screen.
'''
from kivy.app import App
from kivy.uix.gridlayout import GridLayout
from kivy.uix.widget import Widget
from kivy.lang import Builder
Builder.load_string('''
<LineEllipse1>:
canvas:
Color:
rgba: 1, .1, .1, .9
Line:
width: 2.
ellipse: (self.x, self.y, self.width, self.height)
Label:
center: root.center
text: 'Ellipse'
<LineEllipse2>:
canvas:
Color:
rgba: 1, .1, .1, .9
Line:
width: 2.
ellipse: (self.x, self.y, self.width, self.height, 90, 180)
Label:
center: root.center
text: 'Ellipse from 90 to 180'
# fun result with low segments!
<LineEllipse3>:
canvas:
Color:
rgba: 1, .1, .1, .9
Line:
width: 2.
ellipse: (self.x, self.y, self.width, self.height, 90, 720, 10)
Label:
center: root.center
text: 'Ellipse from 90 to 720\\n10 segments'
halign: 'center'
<LineCircle1>:
canvas:
Color:
rgba: .1, 1, .1, .9
Line:
width: 2.
circle:
(self.center_x, self.center_y, min(self.width, self.height)
/ 2)
Label:
center: root.center
text: 'Circle'
<LineCircle2>:
canvas:
Color:
rgba: .1, 1, .1, .9
Line:
width: 2.
circle:
(self.center_x, self.center_y, min(self.width, self.height)
/ 2, 90, 180)
Label:
center: root.center
text: 'Circle from 90 to 180'
<LineCircle3>:
canvas:
Color:
rgba: .1, 1, .1, .9
Line:
width: 2.
circle:
(self.center_x, self.center_y, min(self.width, self.height)
/ 2, 90, 180, 10)
Label:
center: root.center
text: 'Circle from 90 to 180\\n10 segments'
halign: 'center'
<LineCircle4>:
canvas:
Color:
rgba: .1, 1, .1, .9
Line:
width: 2.
circle:
(self.center_x, self.center_y, min(self.width, self.height)
/ 2, 0, 360)
Label:
center: root.center
text: 'Circle from 0 to 360'
halign: 'center'
<LineRectangle>:
canvas:
Color:
rgba: .1, .1, 1, .9
Line:
width: 2.
rectangle: (self.x, self.y, self.width, self.height)
Label:
center: root.center
text: 'Rectangle'
<LineBezier>:
canvas:
Color:
rgba: .1, .1, 1, .9
Line:
width: 2.
bezier:
(self.x, self.y, self.center_x - 40, self.y + 100,
self.center_x + 40, self.y - 100, self.right, self.y)
Label:
center: root.center
text: 'Bezier'
''')
class LineEllipse1(Widget):
pass
class LineEllipse2(Widget):
pass
class LineEllipse3(Widget):
pass
class LineCircle1(Widget):
pass
class LineCircle2(Widget):
pass
class LineCircle3(Widget):
pass
class LineCircle4(Widget):
pass
class LineRectangle(Widget):
pass
class LineBezier(Widget):
pass
class LineExtendedApp(App):
def build(self):
root = GridLayout(cols=2, padding=50, spacing=50)
root.add_widget(LineEllipse1())
root.add_widget(LineEllipse2())
root.add_widget(LineEllipse3())
root.add_widget(LineCircle1())
root.add_widget(LineCircle2())
root.add_widget(LineCircle3())
root.add_widget(LineCircle4())
root.add_widget(LineRectangle())
root.add_widget(LineBezier())
return root
if __name__ == '__main__':
LineExtendedApp().run()
|
mit
|
manjunaths/tensorflow
|
tensorflow/contrib/learn/__init__.py
|
8
|
2286
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# TODO(ptucker,ipolosukhin): Improve descriptions.
"""High level API for learning with TensorFlow.
## Estimators
Train and evaluate TensorFlow models.
@@BaseEstimator
@@Estimator
@@Trainable
@@Evaluable
@@KMeansClustering
@@ModeKeys
@@ModelFnOps
@@MetricSpec
@@PredictionKey
@@DNNClassifier
@@DNNRegressor
@@DNNLinearCombinedRegressor
@@DNNLinearCombinedClassifier
@@LinearClassifier
@@LinearRegressor
@@LogisticRegressor
## Distributed training utilities
@@Experiment
@@ExportStrategy
@@TaskType
## Graph actions
Perform various training, evaluation, and inference actions on a graph.
@@NanLossDuringTrainingError
@@RunConfig
@@evaluate
@@infer
@@run_feeds
@@run_n
@@train
## Input processing
Queue and read batched input data.
@@extract_dask_data
@@extract_dask_labels
@@extract_pandas_data
@@extract_pandas_labels
@@extract_pandas_matrix
@@infer_real_valued_columns_from_input
@@infer_real_valued_columns_from_input_fn
@@read_batch_examples
@@read_batch_features
@@read_batch_record_features
Export utilities
@@build_parsing_serving_input_fn
@@ProblemType
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['datasets', 'head', 'io', 'models',
'monitors', 'NotFittedError', 'ops', 'preprocessing',
'utils', 'graph_actions']
remove_undocumented(__name__, _allowed_symbols)
|
apache-2.0
|
tangfeixiong/nova
|
nova/tests/functional/v3/test_multiple_create.py
|
30
|
2240
|
# Copyright 2012 Nebula, Inc.
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.tests.functional.v3 import test_servers
from nova.tests.unit.image import fake
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.extensions')
class MultipleCreateJsonTest(test_servers.ServersSampleBase):
extension_name = "os-multiple-create"
_api_version = 'v2'
def _get_flags(self):
f = super(MultipleCreateJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.multiple_create.'
'Multiple_create')
return f
def test_multiple_create(self):
subs = {
'image_id': fake.get_valid_image_id(),
'host': self._get_host(),
'min_count': "2",
'max_count': "3"
}
response = self._do_post('servers', 'multiple-create-post-req', subs)
subs.update(self._get_regexes())
self._verify_response('multiple-create-post-resp', subs, response, 202)
def test_multiple_create_without_reservation_id(self):
subs = {
'image_id': fake.get_valid_image_id(),
'host': self._get_host(),
'min_count': "2",
'max_count': "3"
}
response = self._do_post('servers', 'multiple-create-no-resv-post-req',
subs)
subs.update(self._get_regexes())
self._verify_response('multiple-create-no-resv-post-resp', subs,
response, 202)
|
apache-2.0
|
uber/ludwig
|
ludwig/encoders/sequence_encoders.py
|
1
|
102959
|
#! /usr/bin/env python
# coding=utf-8
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import logging
from abc import ABC
import tensorflow as tf
from tensorflow.keras.layers import Dense
from ludwig.encoders.base import Encoder
from ludwig.utils.registry import Registry, register, register_default
from ludwig.modules.attention_modules import TrasformerStack
from ludwig.modules.convolutional_modules import Conv1DStack, \
ParallelConv1DStack, ParallelConv1D
from ludwig.modules.embedding_modules import EmbedSequence, \
TokenAndPositionEmbedding
from ludwig.modules.fully_connected_modules import FCStack
from ludwig.modules.recurrent_modules import RecurrentStack
from ludwig.modules.reduction_modules import SequenceReducer
logger = logging.getLogger(__name__)
ENCODER_REGISTRY = Registry()
class SequenceEncoder(Encoder, ABC):
@classmethod
def register(cls, name):
ENCODER_REGISTRY[name] = cls
@register_default(name='passthrough')
class SequencePassthroughEncoder(SequenceEncoder):
def __init__(
self,
reduce_output=None,
**kwargs
):
"""
:param reduce_output: defines how to reduce the output tensor along
the `s` sequence length dimention if the rank of the tensor
is greater than 2. Available values are: `sum`,
`mean` or `avg`, `max`, `concat` (concatenates along
the first dimension), `last` (returns the last vector of the
first dimension) and `None` or `null` (which does not reduce
and returns the full tensor).
:type reduce_output: str
"""
super(SequencePassthroughEncoder, self).__init__()
logger.debug(' {}'.format(self.name))
self.reduce_output = reduce_output
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
if self.reduce_output is None:
self.supports_masking = True
def call(
self,
input_sequence,
training=True,
mask=None
):
"""
:param input_sequence: The input sequence fed into the encoder.
Shape: [batch x sequence length], type tf.int32
:type input_sequence: Tensor
:param is_training: Tesnor (tf.bool) specifying if in training mode
(important for dropout)
:type is_training: Tensor
"""
input_sequence = tf.cast(input_sequence, tf.float32)
while len(input_sequence.shape) < 3:
input_sequence = tf.expand_dims(
input_sequence, -1
)
hidden = self.reduce_sequence(input_sequence)
return {'encoder_output': hidden}
@register(name='embed')
class SequenceEmbedEncoder(SequenceEncoder):
def __init__(
self,
vocab,
representation='dense',
embedding_size=256,
embeddings_trainable=True,
pretrained_embeddings=None,
embeddings_on_cpu=False,
weights_initializer=None,
weights_regularizer=None,
dropout=0,
reduce_output='sum',
**kwargs
):
"""
:param should_embed: If True the input sequence is expected
to be made of integers and will be mapped into embeddings
:type should_embed: Boolean
:param vocab: Vocabulary of the input feature to encode
:type vocab: List
:param representation: the possible values are `dense` and `sparse`.
`dense` means the mebeddings are initialized randomly,
`sparse` meanse they are initialized to be one-hot encodings.
:type representation: Str (one of 'dense' or 'sparse')
:param embedding_size: it is the maximum embedding size, the actual
size will be `min(vocaularyb_size, embedding_size)`
for `dense` representations and exacly `vocaularyb_size`
for the `sparse` encoding, where `vocabulary_size` is
the number of different strings appearing in the training set
in the column the feature is named after (plus 1 for `<UNK>`).
:type embedding_size: Integer
:param embeddings_trainable: If `True` embeddings are trained during
the training process, if `False` embeddings are fixed.
It may be useful when loading pretrained embeddings
for avoiding finetuning them. This parameter has effect only
for `representation` is `dense` as `sparse` one-hot encodings
are not trainable.
:type embeddings_trainable: Boolean
:param pretrained_embeddings: by default `dense` embeddings
are initialized randomly, but this parameter allows to specify
a path to a file containing embeddings in the GloVe format.
When the file containing the embeddings is loaded, only the
embeddings with labels present in the vocabulary are kept,
the others are discarded. If the vocabulary contains strings
that have no match in the embeddings file, their embeddings
are initialized with the average of all other embedding plus
some random noise to make them different from each other.
This parameter has effect only if `representation` is `dense`.
:type pretrained_embeddings: str (filepath)
:param embeddings_on_cpu: by default embedings matrices are stored
on GPU memory if a GPU is used, as it allows
for faster access, but in some cases the embedding matrix
may be really big and this parameter forces the placement
of the embedding matrix in regular memroy and the CPU is used
to resolve them, slightly slowing down the process
as a result of data transfer between CPU and GPU memory.
:param dropout: determines if there should be a dropout layer before
returning the encoder output.
:type dropout: Boolean
:param weights_initializer: the initializer to use. If `None`, the default
initialized of each variable is used (`glorot_uniform`
in most cases). Options are: `constant`, `identity`, `zeros`,
`ones`, `orthogonal`, `normal`, `uniform`,
`truncated_normal`, `variance_scaling`, `glorot_normal`,
`glorot_uniform`, `xavier_normal`, `xavier_uniform`,
`he_normal`, `he_uniform`, `lecun_normal`, `lecun_uniform`.
Alternatively it is possible to specify a dictionary with
a key `type` that identifies the type of initialzier and
other keys for its parameters, e.g.
`{type: normal, mean: 0, stddev: 0}`.
To know the parameters of each initializer, please refer to
TensorFlow's documentation.
:type weights_initializer: str
:param regularize: if `True` the embedding wieghts are added to
the set of weights that get reularized by a regularization
loss (if the `regularization_lambda` in `training`
is greater than 0).
:type regularize: Boolean
:param reduce_output: defines how to reduce the output tensor along
the `s` sequence length dimention if the rank of the tensor
is greater than 2. Available values are: `sum`,
`mean` or `avg`, `max`, `concat` (concatenates along
the first dimension), `last` (returns the last vector of the
first dimension) and `None` or `null` (which does not reduce
and returns the full tensor).
:type reduce_output: str
:param weights_regularizer: The regularizer to use for the weights
of the encoder.
:type weights_regularizer:
:param dropout: Tensor (tf.float) of the probability of dropout
:type dropout: Tensor
"""
super(SequenceEmbedEncoder, self).__init__()
logger.debug(' {}'.format(self.name))
self.reduce_output = reduce_output
if self.reduce_output is None:
self.supports_masking = True
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
logger.debug(' EmbedSequence')
self.embed_sequence = EmbedSequence(
vocab,
embedding_size,
representation=representation,
embeddings_trainable=embeddings_trainable,
pretrained_embeddings=pretrained_embeddings,
embeddings_on_cpu=embeddings_on_cpu,
dropout=dropout,
embedding_initializer=weights_initializer,
embedding_regularizer=weights_regularizer
)
def call(self, inputs, training=None, mask=None):
"""
:param inputs: The input sequence fed into the encoder.
Shape: [batch x sequence length], type tf.int32
:type inputs: Tensor
:param training: specifying if in training mode
(important for dropout)
:type training: Boolean
"""
# ================ Embeddings ================
embedded_sequence = self.embed_sequence(
inputs, training=training, mask=mask
)
hidden = self.reduce_sequence(embedded_sequence)
return {'encoder_output': hidden}
@register(name='parallel_cnn')
class ParallelCNN(SequenceEncoder):
def __init__(
self,
should_embed=True,
vocab=None,
representation='dense',
embedding_size=256,
embeddings_trainable=True,
pretrained_embeddings=None,
embeddings_on_cpu=False,
conv_layers=None,
num_conv_layers=None,
filter_size=3,
num_filters=256,
pool_function='max',
pool_size=None,
fc_layers=None,
num_fc_layers=None,
fc_size=256,
use_bias=True,
weights_initializer='glorot_uniform',
bias_initializer='zeros',
weights_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
# weights_constraint=None,
# bias_constraint=None,
norm=None,
norm_params=None,
activation='relu',
dropout=0,
reduce_output='max',
**kwargs):
"""
:param should_embed: If True the input sequence is expected
to be made of integers and will be mapped into embeddings
:type should_embed: Boolean
:param vocab: Vocabulary of the input feature to encode
:type vocab: List
:param representation: the possible values are `dense` and `sparse`.
`dense` means the mebeddings are initialized randomly,
`sparse` meanse they are initialized to be one-hot encodings.
:type representation: Str (one of 'dense' or 'sparse')
:param embedding_size: it is the maximum embedding size, the actual
size will be `min(vocaularyb_size, embedding_size)`
for `dense` representations and exacly `vocaularyb_size`
for the `sparse` encoding, where `vocabulary_size` is
the number of different strings appearing in the training set
in the column the feature is named after (plus 1 for `<UNK>`).
:type embedding_size: Integer
:param embeddings_trainable: If `True` embeddings are trained during
the training process, if `False` embeddings are fixed.
It may be useful when loading pretrained embeddings
for avoiding finetuning them. This parameter has effect only
for `representation` is `dense` as `sparse` one-hot encodings
are not trainable.
:type embeddings_trainable: Boolean
:param pretrained_embeddings: by default `dense` embeddings
are initialized randomly, but this parameter allows to specify
a path to a file containing embeddings in the GloVe format.
When the file containing the embeddings is loaded, only the
embeddings with labels present in the vocabulary are kept,
the others are discarded. If the vocabulary contains strings
that have no match in the embeddings file, their embeddings
are initialized with the average of all other embedding plus
some random noise to make them different from each other.
This parameter has effect only if `representation` is `dense`.
:type pretrained_embeddings: str (filepath)
:param embeddings_on_cpu: by default embedings matrices are stored
on GPU memory if a GPU is used, as it allows
for faster access, but in some cases the embedding matrix
may be really big and this parameter forces the placement
of the embedding matrix in regular memroy and the CPU is used
to resolve them, slightly slowing down the process
as a result of data transfer between CPU and GPU memory.
:param conv_layers: it is a list of dictionaries containing
the parameters of all the convolutional layers. The length
of the list determines the number of parallel convolutional
layers and the content of each dictionary determines
the parameters for a specific layer. The available parameters
for each layer are: `filter_size`, `num_filters`, `pool`,
`norm`, `activation` and `regularize`. If any of those values
is missing from the dictionary, the default one specified
as a parameter of the encoder will be used instead. If both
`conv_layers` and `num_conv_layers` are `None`, a default
list will be assigned to `conv_layers` with the value
`[{filter_size: 2}, {filter_size: 3}, {filter_size: 4},
{filter_size: 5}]`.
:type conv_layers: List
:param num_conv_layers: if `conv_layers` is `None`, this is
the number of parallel convolutional layers.
:type num_conv_layers: Integer
:param filter_size: if a `filter_size` is not already specified in
`conv_layers` this is the default `filter_size` that
will be used for each layer. It indicates how wide is
the 1d convolutional filter.
:type filter_size: Integer
:param num_filters: if a `num_filters` is not already specified in
`conv_layers` this is the default `num_filters` that
will be used for each layer. It indicates the number
of filters, and by consequence the output channels of
the 1d convolution.
:type num_filters: Integer
:param pool_size: if a `pool_size` is not already specified
in `conv_layers` this is the default `pool_size` that
will be used for each layer. It indicates the size of
the max pooling that will be performed along the `s` sequence
dimension after the convolution operation.
:type pool_size: Integer
:param fc_layers: it is a list of dictionaries containing
the parameters of all the fully connected layers. The length
of the list determines the number of stacked fully connected
layers and the content of each dictionary determines
the parameters for a specific layer. The available parameters
for each layer are: `fc_size`, `norm`, `activation` and
`regularize`. If any of those values is missing from
the dictionary, the default one specified as a parameter of
the encoder will be used instead. If both `fc_layers` and
`num_fc_layers` are `None`, a default list will be assigned
to `fc_layers` with the value
`[{fc_size: 512}, {fc_size: 256}]`
(only applies if `reduce_output` is not `None`).
:type fc_layers: List
:param num_fc_layers: if `fc_layers` is `None`, this is the number
of stacked fully connected layers (only applies if
`reduce_output` is not `None`).
:type num_fc_layers: Integer
:param fc_size: if a `fc_size` is not already specified in
`fc_layers` this is the default `fc_size` that will be used
for each layer. It indicates the size of the output
of a fully connected layer.
:type fc_size: Integer
:param norm: if a `norm` is not already specified in `conv_layers`
or `fc_layers` this is the default `norm` that will be used
for each layer. It indicates the norm of the output.
:type norm: str
:param activation: Default activation function to use
:type activation: Str
:param dropout: determines if there should be a dropout layer before
returning the encoder output.
:type dropout: Boolean
:param initializer: the initializer to use. If `None` it uses
`glorot_uniform`. Options are: `constant`, `identity`,
`zeros`, `ones`, `orthogonal`, `normal`, `uniform`,
`truncated_normal`, `variance_scaling`, `glorot_normal`,
`glorot_uniform`, `xavier_normal`, `xavier_uniform`,
`he_normal`, `he_uniform`, `lecun_normal`, `lecun_uniform`.
Alternatively it is possible to specify a dictionary with
a key `type` that identifies the type of initialzier and
other keys for its parameters,
e.g. `{type: normal, mean: 0, stddev: 0}`.
To know the parameters of each initializer, please refer
to TensorFlow's documentation.
:type initializer: str
:param regularize: if a `regularize` is not already specified in
`conv_layers` or `fc_layers` this is the default `regularize`
that will be used for each layer. It indicates if
the layer weights should be considered when computing
a regularization loss.
:type regularize:
:param reduce_output: defines how to reduce the output tensor of
the convolutional layers along the `s` sequence length
dimention if the rank of the tensor is greater than 2.
Available values are: `sum`, `mean` or `avg`, `max`, `concat`
(concatenates along the first dimension), `last` (returns
the last vector of the first dimension) and `None` or `null`
(which does not reduce and returns the full tensor).
:type reduce_output: str
"""
super(ParallelCNN, self).__init__()
logger.debug(' {}'.format(self.name))
if conv_layers is not None and num_conv_layers is None:
# use custom-defined layers
self.conv_layers = conv_layers
self.num_conv_layers = len(conv_layers)
elif conv_layers is None and num_conv_layers is not None:
# generate num_conv_layers with default parameters
self.conv_layers = None
self.num_conv_layers = num_conv_layers
elif conv_layers is None and num_conv_layers is None:
# use default layers with varying filter sizes
self.conv_layers = [
{'filter_size': 2},
{'filter_size': 3},
{'filter_size': 4},
{'filter_size': 5}
]
self.num_conv_layers = 4
else:
raise ValueError(
'Invalid layer parametrization, use either conv_layers or'
' num_conv_layers'
)
# The user is expected to provide fc_layers or num_fc_layers
# The following logic handles the case where the user either provides
# both or neither.
if fc_layers is None and num_fc_layers is None:
# use default layers with varying filter sizes
fc_layers = [
{'fc_size': 512},
{'fc_size': 256}
]
num_fc_layers = 2
elif fc_layers is not None and num_fc_layers is not None:
raise ValueError(
'Invalid layer parametrization, use either fc_layers or '
'num_fc_layers only. Not both.'
)
self.reduce_output = reduce_output
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
self.should_embed = should_embed
self.embed_sequence = None
if self.should_embed:
logger.debug(' EmbedSequence')
self.embed_sequence = EmbedSequence(
vocab,
embedding_size,
representation=representation,
embeddings_trainable=embeddings_trainable,
pretrained_embeddings=pretrained_embeddings,
embeddings_on_cpu=embeddings_on_cpu,
dropout=dropout,
embedding_initializer=weights_initializer,
embedding_regularizer=weights_regularizer
)
logger.debug(' ParallelConv1D')
self.parallel_conv1d = ParallelConv1D(
layers=self.conv_layers,
default_num_filters=num_filters,
default_filter_size=filter_size,
default_use_bias=use_bias,
default_weights_initializer=weights_initializer,
default_bias_initializer=bias_initializer,
default_weights_regularizer=weights_regularizer,
default_bias_regularizer=bias_regularizer,
default_activity_regularizer=activity_regularizer,
# default_weights_constraint=None,
# default_bias_constraint=None,
default_norm=norm,
default_norm_params=norm_params,
default_activation=activation,
default_dropout=dropout,
default_pool_function=pool_function,
default_pool_size=pool_size,
default_pool_padding='same',
)
if self.reduce_output is not None:
logger.debug(' FCStack')
self.fc_stack = FCStack(
layers=fc_layers,
num_layers=num_fc_layers,
default_fc_size=fc_size,
default_use_bias=use_bias,
default_weights_initializer=weights_initializer,
default_bias_initializer=bias_initializer,
default_weights_regularizer=weights_regularizer,
default_bias_regularizer=bias_regularizer,
default_activity_regularizer=activity_regularizer,
# default_weights_constraint=weights_constraint,
# default_bias_constraint=bias_constraint,
default_norm=norm,
default_norm_params=norm_params,
default_activation=activation,
default_dropout=dropout,
)
def call(self, inputs, training=None, mask=None):
"""
:param inputs: The input sequence fed into the encoder.
Shape: [batch x sequence length], type tf.int
:type inputs: Tensor
:param training: bool specifying if in training mode (important for dropout)
:type training: bool
"""
# ================ Embeddings ================
if self.should_embed:
embedded_sequence = self.embed_sequence(
inputs, training=training, mask=mask
)
else:
embedded_sequence = inputs
while len(embedded_sequence.shape) < 3:
embedded_sequence = tf.expand_dims(embedded_sequence, -1)
# shape=(?, sequence_length, embedding_size)
hidden = embedded_sequence
# ================ Conv Layers ================
hidden = self.parallel_conv1d(
hidden,
training=training,
mask=mask
)
# ================ Sequence Reduction ================
if self.reduce_output is not None:
hidden = self.reduce_sequence(hidden)
# ================ FC Layers ================
hidden = self.fc_stack(
hidden,
training=training,
mask=mask
)
return {'encoder_output': hidden}
@register(name='stacked_cnn')
class StackedCNN(SequenceEncoder):
def __init__(
self,
should_embed=True,
vocab=None,
representation='dense',
embedding_size=256,
embeddings_trainable=True,
pretrained_embeddings=None,
embeddings_on_cpu=False,
conv_layers=None,
num_conv_layers=None,
num_filters=256,
filter_size=5,
strides=1,
padding='same',
dilation_rate=1,
pool_function='max',
pool_size=None,
pool_strides=None,
pool_padding='same',
fc_layers=None,
num_fc_layers=None,
fc_size=256,
use_bias=True,
weights_initializer='glorot_uniform',
bias_initializer='zeros',
weights_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
# weights_constraint=None,
# bias_constraint=None,
norm=None,
norm_params=None,
activation='relu',
dropout=0,
reduce_output='max',
**kwargs
):
"""
:param should_embed: If True the input sequence is expected
to be made of integers and will be mapped into embeddings
:type should_embed: Boolean
:param vocab: Vocabulary of the input feature to encode
:type vocab: List
:param representation: the possible values are `dense` and `sparse`.
`dense` means the mebeddings are initialized randomly,
`sparse` meanse they are initialized to be one-hot encodings.
:type representation: Str (one of 'dense' or 'sparse')
:param embedding_size: it is the maximum embedding size, the actual
size will be `min(vocaularyb_size, embedding_size)`
for `dense` representations and exacly `vocaularyb_size`
for the `sparse` encoding, where `vocabulary_size` is
the number of different strings appearing in the training set
in the column the feature is named after (plus 1 for `<UNK>`).
:type embedding_size: Integer
:param embeddings_trainable: If `True` embeddings are trained during
the training process, if `False` embeddings are fixed.
It may be useful when loading pretrained embeddings
for avoiding finetuning them. This parameter has effect only
for `representation` is `dense` as `sparse` one-hot encodings
are not trainable.
:type embeddings_trainable: Boolean
:param pretrained_embeddings: by default `dense` embeddings
are initialized randomly, but this parameter allows to specify
a path to a file containing embeddings in the GloVe format.
When the file containing the embeddings is loaded, only the
embeddings with labels present in the vocabulary are kept,
the others are discarded. If the vocabulary contains strings
that have no match in the embeddings file, their embeddings
are initialized with the average of all other embedding plus
some random noise to make them different from each other.
This parameter has effect only if `representation` is `dense`.
:type pretrained_embeddings: str (filepath)
:param embeddings_on_cpu: by default embedings matrices are stored
on GPU memory if a GPU is used, as it allows
for faster access, but in some cases the embedding matrix
may be really big and this parameter forces the placement
of the embedding matrix in regular memroy and the CPU is used
to resolve them, slightly slowing down the process
as a result of data transfer between CPU and GPU memory.
:param conv_layers: it is a list of dictionaries containing
the parameters of all the convolutional layers. The length
of the list determines the number of parallel convolutional
layers and the content of each dictionary determines
the parameters for a specific layer. The available parameters
for each layer are: `filter_size`, `num_filters`, `pool`,
`norm`, `activation` and `regularize`. If any of those values
is missing from the dictionary, the default one specified
as a parameter of the encoder will be used instead. If both
`conv_layers` and `num_conv_layers` are `None`, a default
list will be assigned to `conv_layers` with the value
`[{filter_size: 2}, {filter_size: 3}, {filter_size: 4},
{filter_size: 5}]`.
:type conv_layers: List
:param num_conv_layers: if `conv_layers` is `None`, this is
the number of stacked convolutional layers.
:type num_conv_layers: Integer
:param filter_size: if a `filter_size` is not already specified in
`conv_layers` this is the default `filter_size` that
will be used for each layer. It indicates how wide is
the 1d convolutional filter.
:type filter_size: Integer
:param num_filters: if a `num_filters` is not already specified in
`conv_layers` this is the default `num_filters` that
will be used for each layer. It indicates the number
of filters, and by consequence the output channels of
the 1d convolution.
:type num_filters: Integer
:param pool_size: if a `pool_size` is not already specified
in `conv_layers` this is the default `pool_size` that
will be used for each layer. It indicates the size of
the max pooling that will be performed along the `s` sequence
dimension after the convolution operation.
:type pool_size: Integer
:param fc_layers: it is a list of dictionaries containing
the parameters of all the fully connected layers. The length
of the list determines the number of stacked fully connected
layers and the content of each dictionary determines
the parameters for a specific layer. The available parameters
for each layer are: `fc_size`, `norm`, `activation` and
`regularize`. If any of those values is missing from
the dictionary, the default one specified as a parameter of
the encoder will be used instead. If both `fc_layers` and
`num_fc_layers` are `None`, a default list will be assigned
to `fc_layers` with the value
`[{fc_size: 512}, {fc_size: 256}]`
(only applies if `reduce_output` is not `None`).
:type fc_layers: List
:param num_fc_layers: if `fc_layers` is `None`, this is the number
of stacked fully connected layers (only applies if
`reduce_output` is not `None`).
:type num_fc_layers: Integer
:param fc_size: if a `fc_size` is not already specified in
`fc_layers` this is the default `fc_size` that will be used
for each layer. It indicates the size of the output
of a fully connected layer.
:type fc_size: Integer
:param norm: if a `norm` is not already specified in `conv_layers`
or `fc_layers` this is the default `norm` that will be used
for each layer. It indicates the norm of the output.
:type norm: str
:param activation: Default activation function to use
:type activation: Str
:param dropout: determines if there should be a dropout layer before
returning the encoder output.
:type dropout: Boolean
:param initializer: the initializer to use. If `None` it uses
`glorot_uniform`. Options are: `constant`, `identity`,
`zeros`, `ones`, `orthogonal`, `normal`, `uniform`,
`truncated_normal`, `variance_scaling`, `glorot_normal`,
`glorot_uniform`, `xavier_normal`, `xavier_uniform`,
`he_normal`, `he_uniform`, `lecun_normal`, `lecun_uniform`.
Alternatively it is possible to specify a dictionary with
a key `type` that identifies the type of initialzier and
other keys for its parameters,
e.g. `{type: normal, mean: 0, stddev: 0}`.
To know the parameters of each initializer, please refer
to TensorFlow's documentation.
:type initializer: str
:param regularize: if a `regularize` is not already specified in
`conv_layers` or `fc_layers` this is the default `regularize`
that will be used for each layer. It indicates if
the layer weights should be considered when computing
a regularization loss.
:type regularize:
:param reduce_output: defines how to reduce the output tensor of
the convolutional layers along the `s` sequence length
dimention if the rank of the tensor is greater than 2.
Available values are: `sum`, `mean` or `avg`, `max`, `concat`
(concatenates along the first dimension), `last` (returns
the last vector of the first dimension) and `None` or `null`
(which does not reduce and returns the full tensor).
:type reduce_output: str
"""
super(StackedCNN, self).__init__()
logger.debug(' {}'.format(self.name))
if conv_layers is not None and num_conv_layers is None:
# use custom-defined layers
self.conv_layers = conv_layers
self.num_conv_layers = len(conv_layers)
elif conv_layers is None and num_conv_layers is not None:
# generate num_conv_layers with default parameters
self.conv_layers = None
self.num_conv_layers = num_conv_layers
elif conv_layers is None and num_conv_layers is None:
# use default layers with varying filter sizes
self.conv_layers = [
{
'filter_size': 7,
'pool_size': 3,
'regularize': False
},
{
'filter_size': 7,
'pool_size': 3,
'regularize': False
},
{
'filter_size': 3,
'pool_size': None,
'regularize': False
},
{
'filter_size': 3,
'pool_size': None,
'regularize': False
},
{
'filter_size': 3,
'pool_size': None,
'regularize': True
},
{
'filter_size': 3,
'pool_size': 3,
'regularize': True
}
]
self.num_conv_layers = 6
else:
raise ValueError(
'Invalid layer parametrization, use either conv_layers or '
'num_conv_layers'
)
# The user is expected to provide fc_layers or num_fc_layers
# The following logic handles the case where the user either provides
# both or neither.
if fc_layers is None and num_fc_layers is None:
# use default layers with varying filter sizes
fc_layers = [
{'fc_size': 512},
{'fc_size': 256}
]
num_fc_layers = 2
elif fc_layers is not None and num_fc_layers is not None:
raise ValueError(
'Invalid layer parametrization, use either fc_layers or '
'num_fc_layers only. Not both.'
)
self.reduce_output = reduce_output
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
self.should_embed = should_embed
self.embed_sequence = None
if self.should_embed:
logger.debug(' EmbedSequence')
self.embed_sequence = EmbedSequence(
vocab,
embedding_size,
representation=representation,
embeddings_trainable=embeddings_trainable,
pretrained_embeddings=pretrained_embeddings,
embeddings_on_cpu=embeddings_on_cpu,
dropout=dropout,
embedding_initializer=weights_initializer,
embedding_regularizer=weights_regularizer
)
logger.debug(' Conv1DStack')
self.conv1d_stack = Conv1DStack(
layers=self.conv_layers,
default_num_filters=num_filters,
default_filter_size=filter_size,
default_strides=strides,
default_padding=padding,
default_dilation_rate=dilation_rate,
default_use_bias=use_bias,
default_weights_initializer=weights_initializer,
default_bias_initializer=bias_initializer,
default_weights_regularizer=weights_regularizer,
default_bias_regularizer=bias_regularizer,
default_activity_regularizer=activity_regularizer,
# default_weights_constraint=None,
# default_bias_constraint=None,
default_norm=norm,
default_norm_params=norm_params,
default_activation=activation,
default_dropout=dropout,
default_pool_function=pool_function,
default_pool_size=pool_size,
default_pool_strides=pool_strides,
default_pool_padding=pool_padding,
)
if self.reduce_output is not None:
logger.debug(' FCStack')
self.fc_stack = FCStack(
layers=fc_layers,
num_layers=num_fc_layers,
default_fc_size=fc_size,
default_use_bias=use_bias,
default_weights_initializer=weights_initializer,
default_bias_initializer=bias_initializer,
default_weights_regularizer=weights_regularizer,
default_bias_regularizer=bias_regularizer,
default_activity_regularizer=activity_regularizer,
# default_weights_constraint=weights_constraint,
# default_bias_constraint=bias_constraint,
default_norm=norm,
default_norm_params=norm_params,
default_activation=activation,
default_dropout=dropout,
)
def call(self, inputs, training=None, mask=None):
"""
:param input_sequence: The input sequence fed into the encoder.
Shape: [batch x sequence length], type tf.int32
:type input_sequence: Tensor
:param regularizer: The regularizer to use for the weights
of the encoder.
:type regularizer:
:param dropout: Tensor (tf.float) of the probability of dropout
:type dropout: Tensor
:param is_training: Tesnor (tf.bool) specifying if in training mode
(important for dropout)
:type is_training: Tensor
"""
# ================ Embeddings ================
if self.should_embed:
embedded_sequence = self.embed_sequence(
inputs, training=training, mask=mask
)
else:
embedded_sequence = inputs
while len(embedded_sequence.shape) < 3:
embedded_sequence = tf.expand_dims(embedded_sequence, -1)
# shape=(?, sequence_length, embedding_size)
hidden = embedded_sequence
# ================ Conv Layers ================
hidden = self.conv1d_stack(
hidden,
training=training,
mask=mask
)
# ================ Sequence Reduction ================
if self.reduce_output is not None:
hidden = self.reduce_sequence(hidden)
# ================ FC Layers ================
hidden = self.fc_stack(
hidden,
training=training,
mask=mask
)
return {'encoder_output': hidden}
@register(name='stacked_parallel_cnn')
class StackedParallelCNN(SequenceEncoder):
def __init__(
self,
should_embed=True,
vocab=None,
representation='dense',
embedding_size=256,
embeddings_trainable=True,
pretrained_embeddings=None,
embeddings_on_cpu=False,
stacked_layers=None,
num_stacked_layers=None,
filter_size=3,
num_filters=256,
pool_function='max',
pool_size=None,
fc_layers=None,
num_fc_layers=None,
fc_size=256,
use_bias=True,
weights_initializer='glorot_uniform',
bias_initializer='zeros',
weights_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
# weights_constraint=None,
# bias_constraint=None,
norm=None,
norm_params=None,
activation='relu',
dropout=0,
reduce_output='max',
**kwargs
):
"""
:param should_embed: If True the input sequence is expected
to be made of integers and will be mapped into embeddings
:type should_embed: Boolean
:param vocab: Vocabulary of the input feature to encode
:type vocab: List
:param representation: the possible values are `dense` and `sparse`.
`dense` means the mebeddings are initialized randomly,
`sparse` meanse they are initialized to be one-hot encodings.
:type representation: Str (one of 'dense' or 'sparse')
:param embedding_size: it is the maximum embedding size, the actual
size will be `min(vocaularyb_size, embedding_size)`
for `dense` representations and exacly `vocaularyb_size`
for the `sparse` encoding, where `vocabulary_size` is
the number of different strings appearing in the training set
in the column the feature is named after (plus 1 for `<UNK>`).
:type embedding_size: Integer
:param embeddings_trainable: If `True` embeddings are trained during
the training process, if `False` embeddings are fixed.
It may be useful when loading pretrained embeddings
for avoiding finetuning them. This parameter has effect only
for `representation` is `dense` as `sparse` one-hot encodings
are not trainable.
:type embeddings_trainable: Boolean
:param pretrained_embeddings: by default `dense` embeddings
are initialized randomly, but this parameter allows to specify
a path to a file containing embeddings in the GloVe format.
When the file containing the embeddings is loaded, only the
embeddings with labels present in the vocabulary are kept,
the others are discarded. If the vocabulary contains strings
that have no match in the embeddings file, their embeddings
are initialized with the average of all other embedding plus
some random noise to make them different from each other.
This parameter has effect only if `representation` is `dense`.
:type pretrained_embeddings: str (filepath)
:param embeddings_on_cpu: by default embedings matrices are stored
on GPU memory if a GPU is used, as it allows
for faster access, but in some cases the embedding matrix
may be really big and this parameter forces the placement
of the embedding matrix in regular memroy and the CPU is used
to resolve them, slightly slowing down the process
as a result of data transfer between CPU and GPU memory.
:param stacked_layers: it is a of lists of list of dictionaries
containing the parameters of the stack of
parallel convolutional layers. The length of the list
determines the number of stacked parallel
convolutional layers, length of the sub-lists determines
the number of parallel conv layers and the content
of each dictionary determines the parameters for
a specific layer. The available parameters for each layer are:
`filter_size`, `num_filters`, `pool_size`, `norm`,
`activation` and `regularize`. If any of those values
is missing from the dictionary, the default one specified
as a parameter of the encoder will be used instead. If both
`stacked_layers` and `num_stacked_layers` are `None`,
a default list will be assigned to `stacked_layers` with
the value `[[{filter_size: 2}, {filter_size: 3},
{filter_size: 4}, {filter_size: 5}], [{filter_size: 2},
{filter_size: 3}, {filter_size: 4}, {filter_size: 5}],
[{filter_size: 2}, {filter_size: 3}, {filter_size: 4},
{filter_size: 5}]]`.
:type stacked_layers: List
:param num_stacked_layers: if `stacked_layers` is `None`, this is
the number of elements in the stack of
parallel convolutional layers.
:type num_stacked_layers: Integer
:param filter_size: if a `filter_size` is not already specified in
`conv_layers` this is the default `filter_size` that
will be used for each layer. It indicates how wide is
the 1d convolutional filter.
:type filter_size: Integer
:param num_filters: if a `num_filters` is not already specified in
`conv_layers` this is the default `num_filters` that
will be used for each layer. It indicates the number
of filters, and by consequence the output channels of
the 1d convolution.
:type num_filters: Integer
:param pool_size: if a `pool_size` is not already specified
in `conv_layers` this is the default `pool_size` that
will be used for each layer. It indicates the size of
the max pooling that will be performed along the `s` sequence
dimension after the convolution operation.
:type pool_size: Integer
:param fc_layers: it is a list of dictionaries containing
the parameters of all the fully connected layers. The length
of the list determines the number of stacked fully connected
layers and the content of each dictionary determines
the parameters for a specific layer. The available parameters
for each layer are: `fc_size`, `norm`, `activation` and
`regularize`. If any of those values is missing from
the dictionary, the default one specified as a parameter of
the encoder will be used instead. If both `fc_layers` and
`num_fc_layers` are `None`, a default list will be assigned
to `fc_layers` with the value
`[{fc_size: 512}, {fc_size: 256}]`
(only applies if `reduce_output` is not `None`).
:type fc_layers: List
:param num_fc_layers: if `fc_layers` is `None`, this is the number
of stacked fully connected layers (only applies if
`reduce_output` is not `None`).
:type num_fc_layers: Integer
:param fc_size: if a `fc_size` is not already specified in
`fc_layers` this is the default `fc_size` that will be used
for each layer. It indicates the size of the output
of a fully connected layer.
:type fc_size: Integer
:param norm: if a `norm` is not already specified in `conv_layers`
or `fc_layers` this is the default `norm` that will be used
for each layer. It indicates the norm of the output.
:type norm: str
:param activation: Default activation function to use
:type activation: Str
:param dropout: determines if there should be a dropout layer before
returning the encoder output.
:type dropout: Boolean
:param initializer: the initializer to use. If `None` it uses
`glorot_uniform`. Options are: `constant`, `identity`,
`zeros`, `ones`, `orthogonal`, `normal`, `uniform`,
`truncated_normal`, `variance_scaling`, `glorot_normal`,
`glorot_uniform`, `xavier_normal`, `xavier_uniform`,
`he_normal`, `he_uniform`, `lecun_normal`, `lecun_uniform`.
Alternatively it is possible to specify a dictionary with
a key `type` that identifies the type of initialzier and
other keys for its parameters,
e.g. `{type: normal, mean: 0, stddev: 0}`.
To know the parameters of each initializer, please refer
to TensorFlow's documentation.
:type initializer: str
:param regularize: if a `regularize` is not already specified in
`conv_layers` or `fc_layers` this is the default `regularize`
that will be used for each layer. It indicates if
the layer weights should be considered when computing
a regularization loss.
:type regularize:
:param reduce_output: defines how to reduce the output tensor of
the convolutional layers along the `s` sequence length
dimention if the rank of the tensor is greater than 2.
Available values are: `sum`, `mean` or `avg`, `max`, `concat`
(concatenates along the first dimension), `last` (returns
the last vector of the first dimension) and `None` or `null`
(which does not reduce and returns the full tensor).
:type reduce_output: str
"""
super(StackedParallelCNN, self).__init__()
logger.debug(' {}'.format(self.name))
if stacked_layers is not None and num_stacked_layers is None:
# use custom-defined layers
self.stacked_layers = stacked_layers
self.num_stacked_layers = len(stacked_layers)
elif stacked_layers is None and num_stacked_layers is not None:
# generate num_conv_layers with default parameters
self.stacked_layers = None
self.num_stacked_layers = num_stacked_layers
elif stacked_layers is None and num_stacked_layers is None:
# use default layers with varying filter sizes
self.stacked_layers = [
[
{'filter_size': 2},
{'filter_size': 3},
{'filter_size': 4},
{'filter_size': 5}
],
[
{'filter_size': 2},
{'filter_size': 3},
{'filter_size': 4},
{'filter_size': 5}
],
[
{'filter_size': 2},
{'filter_size': 3},
{'filter_size': 4},
{'filter_size': 5}
]
]
self.num_stacked_layers = 6
else:
raise ValueError(
'Invalid layer parametrization, use either stacked_layers or'
' num_stacked_layers'
)
# The user is expected to provide fc_layers or num_fc_layers
# The following logic handles the case where the user either provides
# both or neither.
if fc_layers is None and num_fc_layers is None:
# use default layers with varying filter sizes
fc_layers = [
{'fc_size': 512},
{'fc_size': 256}
]
num_fc_layers = 2
elif fc_layers is not None and num_fc_layers is not None:
raise ValueError(
'Invalid layer parametrization, use either fc_layers or '
'num_fc_layers only. Not both.'
)
self.reduce_output = reduce_output
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
self.should_embed = should_embed
self.embed_sequence = None
if self.should_embed:
logger.debug(' EmbedSequence')
self.embed_sequence = EmbedSequence(
vocab,
embedding_size,
representation=representation,
embeddings_trainable=embeddings_trainable,
pretrained_embeddings=pretrained_embeddings,
embeddings_on_cpu=embeddings_on_cpu,
dropout=dropout,
embedding_initializer=weights_initializer,
embedding_regularizer=weights_regularizer
)
logger.debug(' ParallelConv1DStack')
self.parallel_conv1d_stack = ParallelConv1DStack(
stacked_layers=self.stacked_layers,
default_num_filters=num_filters,
default_filter_size=filter_size,
default_use_bias=use_bias,
default_weights_initializer=weights_initializer,
default_bias_initializer=bias_initializer,
default_weights_regularizer=weights_regularizer,
default_bias_regularizer=bias_regularizer,
default_activity_regularizer=activity_regularizer,
# default_weights_constraint=weights_constraint,
# default_bias_constraint=bias_constraint,
default_norm=norm,
default_norm_params=norm_params,
default_activation=activation,
default_dropout=dropout,
default_pool_function=pool_function,
default_pool_size=pool_size,
)
if self.reduce_output is not None:
logger.debug(' FCStack')
self.fc_stack = FCStack(
layers=fc_layers,
num_layers=num_fc_layers,
default_fc_size=fc_size,
default_use_bias=use_bias,
default_weights_initializer=weights_initializer,
default_bias_initializer=bias_initializer,
default_weights_regularizer=weights_regularizer,
default_bias_regularizer=bias_regularizer,
default_activity_regularizer=activity_regularizer,
# default_weights_constraint=weights_constraint,
# default_bias_constraint=bias_constraint,
default_norm=norm,
default_norm_params=norm_params,
default_activation=activation,
default_dropout=dropout,
)
def call(self, inputs, training=None, mask=None):
"""
:param inputs: The input sequence fed into the encoder.
Shape: [batch x sequence length], type tf.int32
:type inputs: Tensor
:param regularizer: The regularizer to use for the weights
of the encoder.
:type regularizer:
:param dropout: Tensor (tf.float) of the probability of dropout
:type dropout: Tensor
:param is_training: Tesnor (tf.bool) specifying if in training mode
(important for dropout)
:type is_training: Tensor
"""
# ================ Embeddings ================
if self.should_embed:
embedded_sequence = self.embed_sequence(
inputs, training=training, mask=mask
)
else:
embedded_sequence = inputs
while len(embedded_sequence.shape) < 3:
embedded_sequence = tf.expand_dims(embedded_sequence, -1)
# shape=(?, sequence_length, embedding_size)
hidden = embedded_sequence
# ================ Conv Layers ================
hidden = self.parallel_conv1d_stack(
hidden,
training=training,
mask=mask
)
# ================ Sequence Reduction ================
if self.reduce_output is not None:
hidden = self.reduce_sequence(hidden)
# ================ FC Layers ================
hidden = self.fc_stack(
hidden,
training=training,
mask=mask
)
return {'encoder_output': hidden}
@register(name='rnn')
class StackedRNN(SequenceEncoder):
def __init__(
self,
should_embed=True,
vocab=None,
representation='dense',
embedding_size=256,
embeddings_trainable=True,
pretrained_embeddings=None,
embeddings_on_cpu=False,
num_layers=1,
state_size=256,
cell_type='rnn',
bidirectional=False,
activation='tanh',
recurrent_activation='sigmoid',
unit_forget_bias=True,
recurrent_initializer='orthogonal',
recurrent_regularizer=None,
# recurrent_constraint=None,
dropout=0.0,
recurrent_dropout=0.0,
fc_layers=None,
num_fc_layers=0,
fc_size=256,
use_bias=True,
weights_initializer='glorot_uniform',
bias_initializer='zeros',
weights_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
# weights_constraint=None,
# bias_constraint=None,
norm=None,
norm_params=None,
fc_activation='relu',
fc_dropout=0,
reduce_output='last',
**kwargs
):
"""
:param should_embed: If True the input sequence is expected
to be made of integers and will be mapped into embeddings
:type should_embed: Boolean
:param vocab: Vocabulary of the input feature to encode
:type vocab: List
:param representation: the possible values are `dense` and `sparse`.
`dense` means the mebeddings are initialized randomly,
`sparse` meanse they are initialized to be one-hot encodings.
:type representation: Str (one of 'dense' or 'sparse')
:param embedding_size: it is the maximum embedding size, the actual
size will be `min(vocaularyb_size, embedding_size)`
for `dense` representations and exacly `vocaularyb_size`
for the `sparse` encoding, where `vocabulary_size` is
the number of different strings appearing in the training set
in the column the feature is named after (plus 1 for `<UNK>`).
:type embedding_size: Integer
:param embeddings_trainable: If `True` embeddings are trained during
the training process, if `False` embeddings are fixed.
It may be useful when loading pretrained embeddings
for avoiding finetuning them. This parameter has effect only
for `representation` is `dense` as `sparse` one-hot encodings
are not trainable.
:type embeddings_trainable: Boolean
:param pretrained_embeddings: by default `dense` embeddings
are initialized randomly, but this parameter allows to specify
a path to a file containing embeddings in the GloVe format.
When the file containing the embeddings is loaded, only the
embeddings with labels present in the vocabulary are kept,
the others are discarded. If the vocabulary contains strings
that have no match in the embeddings file, their embeddings
are initialized with the average of all other embedding plus
some random noise to make them different from each other.
This parameter has effect only if `representation` is `dense`.
:type pretrained_embeddings: str (filepath)
:param embeddings_on_cpu: by default embedings matrices are stored
on GPU memory if a GPU is used, as it allows
for faster access, but in some cases the embedding matrix
may be really big and this parameter forces the placement
of the embedding matrix in regular memroy and the CPU is used
to resolve them, slightly slowing down the process
as a result of data transfer between CPU and GPU memory.
:param conv_layers: it is a list of dictionaries containing
the parameters of all the convolutional layers. The length
of the list determines the number of parallel convolutional
layers and the content of each dictionary determines
the parameters for a specific layer. The available parameters
for each layer are: `filter_size`, `num_filters`, `pool`,
`norm`, `activation` and `regularize`. If any of those values
is missing from the dictionary, the default one specified
as a parameter of the encoder will be used instead. If both
`conv_layers` and `num_conv_layers` are `None`, a default
list will be assigned to `conv_layers` with the value
`[{filter_size: 2}, {filter_size: 3}, {filter_size: 4},
{filter_size: 5}]`.
:type conv_layers: List
:param num_conv_layers: if `conv_layers` is `None`, this is
the number of stacked convolutional layers.
:type num_conv_layers: Integer
:param filter_size: if a `filter_size` is not already specified in
`conv_layers` this is the default `filter_size` that
will be used for each layer. It indicates how wide is
the 1d convolutional filter.
:type filter_size: Integer
:param num_filters: if a `num_filters` is not already specified in
`conv_layers` this is the default `num_filters` that
will be used for each layer. It indicates the number
of filters, and by consequence the output channels of
the 1d convolution.
:type num_filters: Integer
:param pool_size: if a `pool_size` is not already specified
in `conv_layers` this is the default `pool_size` that
will be used for each layer. It indicates the size of
the max pooling that will be performed along the `s` sequence
dimension after the convolution operation.
:type pool_size: Integer
:param num_rec_layers: the number of stacked recurrent layers.
:type num_rec_layers: Integer
:param cell_type: the type of recurrent cell to use.
Avalable values are: `rnn`, `lstm`, `lstm_block`, `lstm`,
`ln`, `lstm_cudnn`, `gru`, `gru_block`, `gru_cudnn`.
For reference about the differences between the cells please
refer to TensorFlow's documentstion. We suggest to use the
`block` variants on CPU and the `cudnn` variants on GPU
because of their increased speed.
:type cell_type: str
:param state_size: the size of the state of the rnn.
:type state_size: Integer
:param bidirectional: if `True` two recurrent networks will perform
encoding in the forward and backward direction and
their outputs will be concatenated.
:type bidirectional: Boolean
:param dropout: determines if there should be a dropout layer before
returning the encoder output.
:type dropout: Boolean
:param initializer: the initializer to use. If `None` it uses
`glorot_uniform`. Options are: `constant`, `identity`,
`zeros`, `ones`, `orthogonal`, `normal`, `uniform`,
`truncated_normal`, `variance_scaling`, `glorot_normal`,
`glorot_uniform`, `xavier_normal`, `xavier_uniform`,
`he_normal`, `he_uniform`, `lecun_normal`, `lecun_uniform`.
Alternatively it is possible to specify a dictionary with
a key `type` that identifies the type of initialzier and
other keys for its parameters,
e.g. `{type: normal, mean: 0, stddev: 0}`.
To know the parameters of each initializer, please refer
to TensorFlow's documentation.
:type initializer: str
:param regularize: if a `regularize` is not already specified in
`conv_layers` or `fc_layers` this is the default `regularize`
that will be used for each layer. It indicates if
the layer weights should be considered when computing
a regularization loss.
:type regularize:
:param reduce_output: defines how to reduce the output tensor of
the convolutional layers along the `s` sequence length
dimention if the rank of the tensor is greater than 2.
Available values are: `sum`, `mean` or `avg`, `max`, `concat`
(concatenates along the first dimension), `last` (returns
the last vector of the first dimension) and `None` or `null`
(which does not reduce and returns the full tensor).
:type reduce_output: str
"""
super(StackedRNN, self).__init__()
logger.debug(' {}'.format(self.name))
self.reduce_output = reduce_output
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
if self.reduce_output is None:
self.supports_masking = True
self.should_embed = should_embed
self.embed_sequence = None
if self.should_embed:
logger.debug(' EmbedSequence')
self.embed_sequence = EmbedSequence(
vocab,
embedding_size,
representation=representation,
embeddings_trainable=embeddings_trainable,
pretrained_embeddings=pretrained_embeddings,
embeddings_on_cpu=embeddings_on_cpu,
dropout=fc_dropout,
embedding_initializer=weights_initializer,
embedding_regularizer=weights_regularizer
)
logger.debug(' RecurrentStack')
self.recurrent_stack = RecurrentStack(
state_size=state_size,
cell_type=cell_type,
num_layers=num_layers,
bidirectional=bidirectional,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
unit_forget_bias=unit_forget_bias,
weights_initializer=weights_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
weights_regularizer=weights_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
# kernel_constraint=kernel_constraint,
# recurrent_constraint=recurrent_constraint,
# bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
)
if self.reduce_output is not None:
logger.debug(' FCStack')
self.fc_stack = FCStack(
layers=fc_layers,
num_layers=num_fc_layers,
default_fc_size=fc_size,
default_use_bias=use_bias,
default_weights_initializer=weights_initializer,
default_bias_initializer=bias_initializer,
default_weights_regularizer=weights_regularizer,
default_bias_regularizer=bias_regularizer,
default_activity_regularizer=activity_regularizer,
# default_weights_constraint=weights_constraint,
# default_bias_constraint=bias_constraint,
default_norm=norm,
default_norm_params=norm_params,
default_activation=fc_activation,
default_dropout=fc_dropout,
)
def call(self, inputs, training=None, mask=None):
"""
:param input_sequence: The input sequence fed into the encoder.
Shape: [batch x sequence length], type tf.int32
:type input_sequence: Tensor
:param regularizer: The regularizer to use for the weights
of the encoder.
:type regularizer:
:param dropout: Tensor (tf.float) of the probability of dropout
:type dropout: Tensor
:param is_training: Tesnor (tf.bool) specifying if in training mode
(important for dropout)
:type is_training: Tensor
"""
# ================ Embeddings ================
if self.should_embed:
embedded_sequence = self.embed_sequence(
inputs, training=training, mask=mask
)
else:
embedded_sequence = inputs
while len(embedded_sequence.shape) < 3:
embedded_sequence = tf.expand_dims(embedded_sequence, -1)
# shape=(?, sequence_length, embedding_size)
hidden = embedded_sequence
# ================ Recurrent Layers ================
hidden, final_state = self.recurrent_stack(
hidden,
training=training,
mask=mask
)
# ================ Sequence Reduction ================
if self.reduce_output is not None:
hidden = self.reduce_sequence(hidden)
# ================ FC Layers ================
hidden = self.fc_stack(
hidden,
training=training,
mask=mask
)
return {
'encoder_output': hidden,
'encoder_output_state': final_state
}
@register(name='cnnrnn')
class StackedCNNRNN(SequenceEncoder):
def __init__(
self,
should_embed=True,
vocab=None,
representation='dense',
embedding_size=256,
embeddings_trainable=True,
pretrained_embeddings=None,
embeddings_on_cpu=False,
conv_layers=None,
num_conv_layers=1,
num_filters=256,
filter_size=5,
strides=1,
padding='same',
dilation_rate=1,
conv_activation='relu',
conv_dropout=0.0,
pool_function='max',
pool_size=2,
pool_strides=None,
pool_padding='same',
num_rec_layers=1,
state_size=256,
cell_type='rnn',
bidirectional=False,
activation='tanh',
recurrent_activation='sigmoid',
unit_forget_bias=True,
recurrent_initializer='orthogonal',
recurrent_regularizer=None,
# recurrent_constraint=None,
dropout=0.0,
recurrent_dropout=0.0,
fc_layers=None,
num_fc_layers=0,
fc_size=256,
use_bias=True,
weights_initializer='glorot_uniform',
bias_initializer='zeros',
weights_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
# weights_constraint=None,
# bias_constraint=None,
norm=None,
norm_params=None,
fc_activation='relu',
fc_dropout=0,
reduce_output='last',
**kwargs
):
"""
:param should_embed: If True the input sequence is expected
to be made of integers and will be mapped into embeddings
:type should_embed: Boolean
:param vocab: Vocabulary of the input feature to encode
:type vocab: List
:param representation: the possible values are `dense` and `sparse`.
`dense` means the mebeddings are initialized randomly,
`sparse` meanse they are initialized to be one-hot encodings.
:type representation: Str (one of 'dense' or 'sparse')
:param embedding_size: it is the maximum embedding size, the actual
size will be `min(vocaularyb_size, embedding_size)`
for `dense` representations and exacly `vocaularyb_size`
for the `sparse` encoding, where `vocabulary_size` is
the number of different strings appearing in the training set
in the column the feature is named after (plus 1 for `<UNK>`).
:type embedding_size: Integer
:param embeddings_trainable: If `True` embeddings are trained during
the training process, if `False` embeddings are fixed.
It may be useful when loading pretrained embeddings
for avoiding finetuning them. This parameter has effect only
for `representation` is `dense` as `sparse` one-hot encodings
are not trainable.
:type embeddings_trainable: Boolean
:param pretrained_embeddings: by default `dense` embeddings
are initialized randomly, but this parameter allows to specify
a path to a file containing embeddings in the GloVe format.
When the file containing the embeddings is loaded, only the
embeddings with labels present in the vocabulary are kept,
the others are discarded. If the vocabulary contains strings
that have no match in the embeddings file, their embeddings
are initialized with the average of all other embedding plus
some random noise to make them different from each other.
This parameter has effect only if `representation` is `dense`.
:type pretrained_embeddings: str (filepath)
:param embeddings_on_cpu: by default embedings matrices are stored
on GPU memory if a GPU is used, as it allows
for faster access, but in some cases the embedding matrix
may be really big and this parameter forces the placement
of the embedding matrix in regular memroy and the CPU is used
to resolve them, slightly slowing down the process
as a result of data transfer between CPU and GPU memory.
:param num_layers: the number of stacked recurrent layers.
:type num_layers: Integer
:param cell_type: the type of recurrent cell to use.
Avalable values are: `rnn`, `lstm`, `lstm_block`, `lstm`,
`ln`, `lstm_cudnn`, `gru`, `gru_block`, `gru_cudnn`.
For reference about the differences between the cells please
refer to TensorFlow's documentstion. We suggest to use the
`block` variants on CPU and the `cudnn` variants on GPU
because of their increased speed.
:type cell_type: str
:param state_size: the size of the state of the rnn.
:type state_size: Integer
:param bidirectional: if `True` two recurrent networks will perform
encoding in the forward and backward direction and
their outputs will be concatenated.
:type bidirectional: Boolean
:param dropout: determines if there should be a dropout layer before
returning the encoder output.
:type dropout: Boolean
:param initializer: the initializer to use. If `None` it uses
`glorot_uniform`. Options are: `constant`, `identity`,
`zeros`, `ones`, `orthogonal`, `normal`, `uniform`,
`truncated_normal`, `variance_scaling`, `glorot_normal`,
`glorot_uniform`, `xavier_normal`, `xavier_uniform`,
`he_normal`, `he_uniform`, `lecun_normal`, `lecun_uniform`.
Alternatively it is possible to specify a dictionary with
a key `type` that identifies the type of initialzier and
other keys for its parameters,
e.g. `{type: normal, mean: 0, stddev: 0}`.
To know the parameters of each initializer, please refer
to TensorFlow's documentation.
:type initializer: str
:param regularize: if a `regularize` is not already specified in
`conv_layers` or `fc_layers` this is the default `regularize`
that will be used for each layer. It indicates if
the layer weights should be considered when computing
a regularization loss.
:type regularize:
:param reduce_output: defines how to reduce the output tensor of
the convolutional layers along the `s` sequence length
dimention if the rank of the tensor is greater than 2.
Available values are: `sum`, `mean` or `avg`, `max`, `concat`
(concatenates along the first dimension), `last` (returns
the last vector of the first dimension) and `None` or `null`
(which does not reduce and returns the full tensor).
:type reduce_output: str
"""
super(StackedCNNRNN, self).__init__()
logger.debug(' {}'.format(self.name))
if conv_layers is not None and num_conv_layers is None:
# use custom-defined layers
self.conv_layers = conv_layers
self.num_conv_layers = len(conv_layers)
elif conv_layers is None and num_conv_layers is not None:
# generate num_conv_layers with default parameters
self.conv_layers = None
self.num_conv_layers = num_conv_layers
elif conv_layers is None and num_conv_layers is None:
# use default layers with varying filter sizes
self.conv_layers = [
{'pool_size': 3},
{'pool_size': None}
]
self.num_conv_layers = 2
else:
raise ValueError(
'Invalid layer parametrization, use either conv_layers or '
'num_conv_layers'
)
self.reduce_output = reduce_output
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
self.should_embed = should_embed
self.embed_sequence = None
if self.should_embed:
logger.debug(' EmbedSequence')
self.embed_sequence = EmbedSequence(
vocab,
embedding_size,
representation=representation,
embeddings_trainable=embeddings_trainable,
pretrained_embeddings=pretrained_embeddings,
embeddings_on_cpu=embeddings_on_cpu,
dropout=fc_dropout,
embedding_initializer=weights_initializer,
embedding_regularizer=weights_regularizer
)
logger.debug(' Conv1DStack')
self.conv1d_stack = Conv1DStack(
layers=self.conv_layers,
default_num_filters=num_filters,
default_filter_size=filter_size,
default_strides=strides,
default_padding=padding,
default_dilation_rate=dilation_rate,
default_use_bias=use_bias,
default_weights_initializer=weights_initializer,
default_bias_initializer=bias_initializer,
default_weights_regularizer=weights_regularizer,
default_bias_regularizer=bias_regularizer,
default_activity_regularizer=activity_regularizer,
# default_weights_constraint=None,
# default_bias_constraint=None,
default_norm=norm,
default_norm_params=norm_params,
default_activation=conv_activation,
default_dropout=conv_dropout,
default_pool_function=pool_function,
default_pool_size=pool_size,
default_pool_strides=pool_strides,
default_pool_padding=pool_padding,
)
logger.debug(' RecurrentStack')
self.recurrent_stack = RecurrentStack(
state_size=state_size,
cell_type=cell_type,
num_layers=num_rec_layers,
bidirectional=bidirectional,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
unit_forget_bias=unit_forget_bias,
weights_initializer=weights_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
weights_regularizer=weights_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
# kernel_constraint=kernel_constraint,
# recurrent_constraint=recurrent_constraint,
# bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
)
if self.reduce_output is not None:
logger.debug(' FCStack')
self.fc_stack = FCStack(
layers=fc_layers,
num_layers=num_fc_layers,
default_fc_size=fc_size,
default_use_bias=use_bias,
default_weights_initializer=weights_initializer,
default_bias_initializer=bias_initializer,
default_weights_regularizer=weights_regularizer,
default_bias_regularizer=bias_regularizer,
default_activity_regularizer=activity_regularizer,
# default_weights_constraint=weights_constraint,
# default_bias_constraint=bias_constraint,
default_norm=norm,
default_norm_params=norm_params,
default_activation=fc_activation,
default_dropout=fc_dropout,
)
def call(self, inputs, training=None, mask=None):
"""
:param input_sequence: The input sequence fed into the encoder.
Shape: [batch x sequence length], type tf.int32
:type input_sequence: Tensor
:param regularizer: The regularizer to use for the weights
of the encoder.
:type regularizer:
:param dropout: Tensor (tf.float) of the probability of dropout
:type dropout: Tensor
:param is_training: Tesnor (tf.bool) specifying if in training mode
(important for dropout)
:type is_training: Tensor
"""
# ================ Embeddings ================
if self.should_embed:
embedded_sequence = self.embed_sequence(
inputs, training=training, mask=mask
)
else:
embedded_sequence = inputs
while len(embedded_sequence.shape) < 3:
embedded_sequence = tf.expand_dims(embedded_sequence, -1)
# shape=(?, sequence_length, embedding_size)
hidden = embedded_sequence
# ================ Conv Layers ================
hidden = self.conv1d_stack(
hidden,
training=training,
mask=mask
)
# ================ Recurrent Layers ================
hidden, final_state = self.recurrent_stack(
hidden,
training=training
)
# ================ Sequence Reduction ================
if self.reduce_output is not None:
hidden = self.reduce_sequence(hidden)
# ================ FC Layers ================
hidden = self.fc_stack(
hidden,
training=training,
mask=mask
)
return {
'encoder_output': hidden,
'encoder_output_state': final_state
}
@register(name='transformer')
class StackedTransformer(SequenceEncoder):
def __init__(
self,
max_sequence_length,
should_embed=True,
vocab=None,
representation='dense',
embedding_size=256,
embeddings_trainable=True,
pretrained_embeddings=None,
embeddings_on_cpu=False,
num_layers=1,
hidden_size=256,
num_heads=8,
transformer_fc_size=256,
dropout=0.1,
fc_layers=None,
num_fc_layers=0,
fc_size=256,
use_bias=True,
weights_initializer='glorot_uniform',
bias_initializer='zeros',
weights_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
# weights_constraint=None,
# bias_constraint=None,
norm=None,
norm_params=None,
fc_activation='relu',
fc_dropout=0,
reduce_output='last',
**kwargs
):
"""
:param should_embed: If True the input sequence is expected
to be made of integers and will be mapped into embeddings
:type should_embed: Boolean
:param vocab: Vocabulary of the input feature to encode
:type vocab: List
:param representation: the possible values are `dense` and `sparse`.
`dense` means the mebeddings are initialized randomly,
`sparse` meanse they are initialized to be one-hot encodings.
:type representation: Str (one of 'dense' or 'sparse')
:param embedding_size: it is the maximum embedding size, the actual
size will be `min(vocaularyb_size, embedding_size)`
for `dense` representations and exacly `vocaularyb_size`
for the `sparse` encoding, where `vocabulary_size` is
the number of different strings appearing in the training set
in the column the feature is named after (plus 1 for `<UNK>`).
:type embedding_size: Integer
:param embeddings_trainable: If `True` embeddings are trained during
the training process, if `False` embeddings are fixed.
It may be useful when loading pretrained embeddings
for avoiding finetuning them. This parameter has effect only
for `representation` is `dense` as `sparse` one-hot encodings
are not trainable.
:type embeddings_trainable: Boolean
:param pretrained_embeddings: by default `dense` embeddings
are initialized randomly, but this parameter allows to specify
a path to a file containing embeddings in the GloVe format.
When the file containing the embeddings is loaded, only the
embeddings with labels present in the vocabulary are kept,
the others are discarded. If the vocabulary contains strings
that have no match in the embeddings file, their embeddings
are initialized with the average of all other embedding plus
some random noise to make them different from each other.
This parameter has effect only if `representation` is `dense`.
:type pretrained_embeddings: str (filepath)
:param embeddings_on_cpu: by default embedings matrices are stored
on GPU memory if a GPU is used, as it allows
for faster access, but in some cases the embedding matrix
may be really big and this parameter forces the placement
of the embedding matrix in regular memroy and the CPU is used
to resolve them, slightly slowing down the process
as a result of data transfer between CPU and GPU memory.
:param conv_layers: it is a list of dictionaries containing
the parameters of all the convolutional layers. The length
of the list determines the number of parallel convolutional
layers and the content of each dictionary determines
the parameters for a specific layer. The available parameters
for each layer are: `filter_size`, `num_filters`, `pool`,
`norm`, `activation` and `regularize`. If any of those values
is missing from the dictionary, the default one specified
as a parameter of the encoder will be used instead. If both
`conv_layers` and `num_conv_layers` are `None`, a default
list will be assigned to `conv_layers` with the value
`[{filter_size: 2}, {filter_size: 3}, {filter_size: 4},
{filter_size: 5}]`.
:type conv_layers: List
:param num_conv_layers: if `conv_layers` is `None`, this is
the number of stacked convolutional layers.
:type num_conv_layers: Integer
:param filter_size: if a `filter_size` is not already specified in
`conv_layers` this is the default `filter_size` that
will be used for each layer. It indicates how wide is
the 1d convolutional filter.
:type filter_size: Integer
:param num_filters: if a `num_filters` is not already specified in
`conv_layers` this is the default `num_filters` that
will be used for each layer. It indicates the number
of filters, and by consequence the output channels of
the 1d convolution.
:type num_filters: Integer
:param pool_size: if a `pool_size` is not already specified
in `conv_layers` this is the default `pool_size` that
will be used for each layer. It indicates the size of
the max pooling that will be performed along the `s` sequence
dimension after the convolution operation.
:type pool_size: Integer
:param num_rec_layers: the number of stacked recurrent layers.
:type num_rec_layers: Integer
:param cell_type: the type of recurrent cell to use.
Avalable values are: `rnn`, `lstm`, `lstm_block`, `lstm`,
`ln`, `lstm_cudnn`, `gru`, `gru_block`, `gru_cudnn`.
For reference about the differences between the cells please
refer to TensorFlow's documentstion. We suggest to use the
`block` variants on CPU and the `cudnn` variants on GPU
because of their increased speed.
:type cell_type: str
:param state_size: the size of the state of the rnn.
:type state_size: Integer
:param bidirectional: if `True` two recurrent networks will perform
encoding in the forward and backward direction and
their outputs will be concatenated.
:type bidirectional: Boolean
:param dropout: determines if there should be a dropout layer before
returning the encoder output.
:type dropout: Boolean
:param initializer: the initializer to use. If `None` it uses
`glorot_uniform`. Options are: `constant`, `identity`,
`zeros`, `ones`, `orthogonal`, `normal`, `uniform`,
`truncated_normal`, `variance_scaling`, `glorot_normal`,
`glorot_uniform`, `xavier_normal`, `xavier_uniform`,
`he_normal`, `he_uniform`, `lecun_normal`, `lecun_uniform`.
Alternatively it is possible to specify a dictionary with
a key `type` that identifies the type of initialzier and
other keys for its parameters,
e.g. `{type: normal, mean: 0, stddev: 0}`.
To know the parameters of each initializer, please refer
to TensorFlow's documentation.
:type initializer: str
:param regularize: if a `regularize` is not already specified in
`conv_layers` or `fc_layers` this is the default `regularize`
that will be used for each layer. It indicates if
the layer weights should be considered when computing
a regularization loss.
:type regularize:
:param reduce_output: defines how to reduce the output tensor of
the convolutional layers along the `s` sequence length
dimention if the rank of the tensor is greater than 2.
Available values are: `sum`, `mean` or `avg`, `max`, `concat`
(concatenates along the first dimension), `last` (returns
the last vector of the first dimension) and `None` or `null`
(which does not reduce and returns the full tensor).
:type reduce_output: str
"""
super(StackedTransformer, self).__init__()
logger.debug(' {}'.format(self.name))
self.reduce_output = reduce_output
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
if self.reduce_output is None:
self.supports_masking = True
self.should_embed = should_embed
self.should_project = False
self.embed_sequence = None
if self.should_embed:
logger.debug(' EmbedSequence')
self.embed_sequence = TokenAndPositionEmbedding(
max_sequence_length,
vocab,
embedding_size,
representation=representation,
embeddings_trainable=embeddings_trainable,
pretrained_embeddings=pretrained_embeddings,
embeddings_on_cpu=embeddings_on_cpu,
dropout=dropout,
embedding_initializer=weights_initializer,
embedding_regularizer=weights_regularizer
)
if embedding_size != hidden_size:
logger.debug(' project_to_embed_size Dense')
self.project_to_hidden_size = Dense(hidden_size)
self.should_project = True
else:
logger.debug(' project_to_embed_size Dense')
self.project_to_hidden_size = Dense(hidden_size)
self.should_project = True
logger.debug(' TransformerStack')
self.transformer_stack = TrasformerStack(
hidden_size=hidden_size,
num_heads=num_heads,
fc_size=transformer_fc_size,
num_layers=num_layers,
dropout=dropout
)
if self.reduce_output is not None:
logger.debug(' FCStack')
self.fc_stack = FCStack(
layers=fc_layers,
num_layers=num_fc_layers,
default_fc_size=fc_size,
default_use_bias=use_bias,
default_weights_initializer=weights_initializer,
default_bias_initializer=bias_initializer,
default_weights_regularizer=weights_regularizer,
default_bias_regularizer=bias_regularizer,
default_activity_regularizer=activity_regularizer,
# default_weights_constraint=weights_constraint,
# default_bias_constraint=bias_constraint,
default_norm=norm,
default_norm_params=norm_params,
default_activation=fc_activation,
default_dropout=fc_dropout,
)
def call(self, inputs, training=None, mask=None):
"""
:param input_sequence: The input sequence fed into the encoder.
Shape: [batch x sequence length], type tf.int32
:type input_sequence: Tensor
:param regularizer: The regularizer to use for the weights
of the encoder.
:type regularizer:
:param dropout: Tensor (tf.float) of the probability of dropout
:type dropout: Tensor
:param is_training: Tesnor (tf.bool) specifying if in training mode
(important for dropout)
:type is_training: Tensor
"""
# ================ Embeddings ================
if self.should_embed:
embedded_sequence = self.embed_sequence(
inputs, training=training, mask=mask
)
else:
embedded_sequence = inputs
while len(embedded_sequence.shape) < 3:
embedded_sequence = tf.expand_dims(embedded_sequence, -1)
# shape=(?, sequence_length, embedding_size)
if self.should_project:
hidden = self.project_to_hidden_size(embedded_sequence)
else:
hidden = embedded_sequence
# shape=(?, sequence_length, hidden)
# ================ Transformer Layers ================
hidden = self.transformer_stack(
hidden,
training=training,
mask=mask
)
# ================ Sequence Reduction ================
if self.reduce_output is not None:
hidden = self.reduce_sequence(hidden)
# ================ FC Layers ================
hidden = self.fc_stack(
hidden,
training=training,
mask=mask
)
return {'encoder_output': hidden}
|
apache-2.0
|
knifenomad/django
|
tests/utils_tests/test_timezone.py
|
149
|
7857
|
import copy
import datetime
import pickle
import unittest
from django.test import override_settings
from django.utils import timezone
try:
import pytz
except ImportError:
pytz = None
requires_pytz = unittest.skipIf(pytz is None, "this test requires pytz")
if pytz is not None:
CET = pytz.timezone("Europe/Paris")
EAT = timezone.get_fixed_timezone(180) # Africa/Nairobi
ICT = timezone.get_fixed_timezone(420) # Asia/Bangkok
class TimezoneTests(unittest.TestCase):
def test_localtime(self):
now = datetime.datetime.utcnow().replace(tzinfo=timezone.utc)
local_tz = timezone.LocalTimezone()
local_now = timezone.localtime(now, local_tz)
self.assertEqual(local_now.tzinfo, local_tz)
def test_localtime_naive(self):
with self.assertRaises(ValueError):
timezone.localtime(datetime.datetime.now())
def test_localtime_out_of_range(self):
local_tz = timezone.LocalTimezone()
long_ago = datetime.datetime(1900, 1, 1, tzinfo=timezone.utc)
try:
timezone.localtime(long_ago, local_tz)
except (OverflowError, ValueError) as exc:
self.assertIn("install pytz", exc.args[0])
else:
raise unittest.SkipTest("Failed to trigger an OverflowError or ValueError")
def test_now(self):
with override_settings(USE_TZ=True):
self.assertTrue(timezone.is_aware(timezone.now()))
with override_settings(USE_TZ=False):
self.assertTrue(timezone.is_naive(timezone.now()))
def test_override(self):
default = timezone.get_default_timezone()
try:
timezone.activate(ICT)
with timezone.override(EAT):
self.assertIs(EAT, timezone.get_current_timezone())
self.assertIs(ICT, timezone.get_current_timezone())
with timezone.override(None):
self.assertIs(default, timezone.get_current_timezone())
self.assertIs(ICT, timezone.get_current_timezone())
timezone.deactivate()
with timezone.override(EAT):
self.assertIs(EAT, timezone.get_current_timezone())
self.assertIs(default, timezone.get_current_timezone())
with timezone.override(None):
self.assertIs(default, timezone.get_current_timezone())
self.assertIs(default, timezone.get_current_timezone())
finally:
timezone.deactivate()
def test_override_decorator(self):
default = timezone.get_default_timezone()
@timezone.override(EAT)
def func_tz_eat():
self.assertIs(EAT, timezone.get_current_timezone())
@timezone.override(None)
def func_tz_none():
self.assertIs(default, timezone.get_current_timezone())
try:
timezone.activate(ICT)
func_tz_eat()
self.assertIs(ICT, timezone.get_current_timezone())
func_tz_none()
self.assertIs(ICT, timezone.get_current_timezone())
timezone.deactivate()
func_tz_eat()
self.assertIs(default, timezone.get_current_timezone())
func_tz_none()
self.assertIs(default, timezone.get_current_timezone())
finally:
timezone.deactivate()
def test_copy(self):
self.assertIsInstance(copy.copy(timezone.UTC()), timezone.UTC)
self.assertIsInstance(copy.copy(timezone.LocalTimezone()), timezone.LocalTimezone)
def test_deepcopy(self):
self.assertIsInstance(copy.deepcopy(timezone.UTC()), timezone.UTC)
self.assertIsInstance(copy.deepcopy(timezone.LocalTimezone()), timezone.LocalTimezone)
def test_pickling_unpickling(self):
self.assertIsInstance(pickle.loads(pickle.dumps(timezone.UTC())), timezone.UTC)
self.assertIsInstance(pickle.loads(pickle.dumps(timezone.LocalTimezone())), timezone.LocalTimezone)
def test_is_aware(self):
self.assertTrue(timezone.is_aware(datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)))
self.assertFalse(timezone.is_aware(datetime.datetime(2011, 9, 1, 13, 20, 30)))
def test_is_naive(self):
self.assertFalse(timezone.is_naive(datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)))
self.assertTrue(timezone.is_naive(datetime.datetime(2011, 9, 1, 13, 20, 30)))
def test_make_aware(self):
self.assertEqual(
timezone.make_aware(datetime.datetime(2011, 9, 1, 13, 20, 30), EAT),
datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT))
with self.assertRaises(ValueError):
timezone.make_aware(datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), EAT)
def test_make_naive(self):
self.assertEqual(
timezone.make_naive(datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), EAT),
datetime.datetime(2011, 9, 1, 13, 20, 30))
self.assertEqual(
timezone.make_naive(datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT), EAT),
datetime.datetime(2011, 9, 1, 13, 20, 30))
with self.assertRaises(ValueError):
timezone.make_naive(datetime.datetime(2011, 9, 1, 13, 20, 30), EAT)
@requires_pytz
def test_make_aware2(self):
self.assertEqual(
timezone.make_aware(datetime.datetime(2011, 9, 1, 12, 20, 30), CET),
CET.localize(datetime.datetime(2011, 9, 1, 12, 20, 30)))
with self.assertRaises(ValueError):
timezone.make_aware(CET.localize(datetime.datetime(2011, 9, 1, 12, 20, 30)), CET)
@requires_pytz
def test_make_aware_pytz(self):
self.assertEqual(
timezone.make_naive(CET.localize(datetime.datetime(2011, 9, 1, 12, 20, 30)), CET),
datetime.datetime(2011, 9, 1, 12, 20, 30))
self.assertEqual(
timezone.make_naive(
pytz.timezone("Asia/Bangkok").localize(datetime.datetime(2011, 9, 1, 17, 20, 30)), CET
),
datetime.datetime(2011, 9, 1, 12, 20, 30))
with self.assertRaises(ValueError):
timezone.make_naive(datetime.datetime(2011, 9, 1, 12, 20, 30), CET)
@requires_pytz
def test_make_aware_pytz_ambiguous(self):
# 2:30 happens twice, once before DST ends and once after
ambiguous = datetime.datetime(2015, 10, 25, 2, 30)
with self.assertRaises(pytz.AmbiguousTimeError):
timezone.make_aware(ambiguous, timezone=CET)
std = timezone.make_aware(ambiguous, timezone=CET, is_dst=False)
dst = timezone.make_aware(ambiguous, timezone=CET, is_dst=True)
self.assertEqual(std - dst, datetime.timedelta(hours=1))
self.assertEqual(std.tzinfo.utcoffset(std), datetime.timedelta(hours=1))
self.assertEqual(dst.tzinfo.utcoffset(dst), datetime.timedelta(hours=2))
@requires_pytz
def test_make_aware_pytz_non_existent(self):
# 2:30 never happened due to DST
non_existent = datetime.datetime(2015, 3, 29, 2, 30)
with self.assertRaises(pytz.NonExistentTimeError):
timezone.make_aware(non_existent, timezone=CET)
std = timezone.make_aware(non_existent, timezone=CET, is_dst=False)
dst = timezone.make_aware(non_existent, timezone=CET, is_dst=True)
self.assertEqual(std - dst, datetime.timedelta(hours=1))
self.assertEqual(std.tzinfo.utcoffset(std), datetime.timedelta(hours=1))
self.assertEqual(dst.tzinfo.utcoffset(dst), datetime.timedelta(hours=2))
# round trip to UTC then back to CET
std = timezone.localtime(timezone.localtime(std, timezone.UTC()), CET)
dst = timezone.localtime(timezone.localtime(dst, timezone.UTC()), CET)
self.assertEqual((std.hour, std.minute), (3, 30))
self.assertEqual((dst.hour, dst.minute), (1, 30))
|
bsd-3-clause
|
kenshay/ImageScript
|
ProgramData/Android/ADB/platform-tools/systrace/catapult/telemetry/third_party/pyfakefs/pyfakefs/example.py
|
23
|
3559
|
# Copyright 2014 Altera Corporation. All Rights Reserved.
# Author: John McGehee
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Example module that is tested in :py:class`pyfakefs.example_test.TestExample`.
This demonstrates the usage of the
:py:class`pyfakefs.fake_filesystem_unittest.TestCase` base class.
The modules related to file handling are bound to the respective fake modules:
>>> os #doctest: +ELLIPSIS
<fake_filesystem.FakeOsModule object...>
>>> os.path #doctest: +ELLIPSIS
<fake_filesystem.FakePathModule object...>
>>> glob #doctest: +ELLIPSIS
<fake_filesystem_glob.FakeGlobModule object...>
>>> shutil #doctest: +ELLIPSIS
<fake_filesystem_shutil.FakeShutilModule object...>
The `open()` built-in is bound to the fake `open()`:
>>> open #doctest: +ELLIPSIS
<fake_filesystem.FakeFileOpen object...>
In Python 2 the `file()` built-in is also bound to the fake `open()`. `file()`
was eliminated in Python 3.
"""
import os
import glob
import shutil
def create_file(path):
'''Create the specified file and add some content to it. Use the `open()`
built in function.
For example, the following file operations occur in the fake file system.
In the real file system, we would not even have permission to write `/test`:
>>> os.path.isdir('/test')
False
>>> os.mkdir('/test')
>>> os.path.isdir('/test')
True
>>> os.path.exists('/test/file.txt')
False
>>> create_file('/test/file.txt')
>>> os.path.exists('/test/file.txt')
True
>>> with open('/test/file.txt') as f:
... f.readlines()
["This is test file '/test/file.txt'.\\n", 'It was created using the open() function.\\n']
'''
with open(path, 'w') as f:
f.write("This is test file '{}'.\n".format(path))
f.write("It was created using the open() function.\n")
def delete_file(path):
'''Delete the specified file.
For example:
>>> os.mkdir('/test')
>>> os.path.exists('/test/file.txt')
False
>>> create_file('/test/file.txt')
>>> os.path.exists('/test/file.txt')
True
>>> delete_file('/test/file.txt')
>>> os.path.exists('/test/file.txt')
False
'''
os.remove(path)
def path_exists(path):
'''Return True if the specified file exists.
For example:
>>> path_exists('/test')
False
>>> os.mkdir('/test')
>>> path_exists('/test')
True
>>>
>>> path_exists('/test/file.txt')
False
>>> create_file('/test/file.txt')
>>> path_exists('/test/file.txt')
True
'''
return os.path.exists(path)
def get_glob(glob_path):
'''Return the list of paths matching the specified glob expression.
For example:
>>> os.mkdir('/test')
>>> create_file('/test/file1.txt')
>>> create_file('/test/file2.txt')
>>> get_glob('/test/file*.txt')
['/test/file1.txt', '/test/file2.txt']
'''
return glob.glob(glob_path)
def rm_tree(path):
'''Delete the specified file hierarchy.'''
shutil.rmtree(path)
|
gpl-3.0
|
caseyrollins/osf.io
|
addons/gitlab/tests/test_views.py
|
4
|
23654
|
# -*- coding: utf-8 -*-
import httplib as http
import mock
import datetime
import pytest
import unittest
from json import dumps
from nose.tools import * # noqa (PEP8 asserts)
from tests.base import OsfTestCase, get_default_metaschema
from osf_tests.factories import ProjectFactory, UserFactory, AuthUserFactory
from github3.repos.branch import Branch
from framework.exceptions import HTTPError
from framework.auth import Auth
from addons.base.tests.views import (
OAuthAddonAuthViewsTestCaseMixin, OAuthAddonConfigViewsTestCaseMixin
)
from addons.gitlab import utils
from addons.gitlab.api import GitLabClient
from addons.gitlab.serializer import GitLabSerializer
from addons.gitlab.utils import check_permissions
from addons.gitlab.tests.utils import create_mock_gitlab, GitLabAddonTestCase
from addons.gitlab.tests.factories import GitLabAccountFactory
pytestmark = pytest.mark.django_db
class TestGitLabAuthViews(GitLabAddonTestCase, OAuthAddonAuthViewsTestCaseMixin, OsfTestCase):
@mock.patch(
'addons.gitlab.models.UserSettings.revoke_remote_oauth_access',
mock.PropertyMock()
)
def test_delete_external_account(self):
super(TestGitLabAuthViews, self).test_delete_external_account()
def test_oauth_start(self):
pass
def test_oauth_finish(self):
pass
class TestGitLabConfigViews(GitLabAddonTestCase, OAuthAddonConfigViewsTestCaseMixin, OsfTestCase):
folder = None
Serializer = GitLabSerializer
client = GitLabClient
## Overrides ##
def setUp(self):
super(TestGitLabConfigViews, self).setUp()
self.mock_api_user = mock.patch('addons.gitlab.api.GitLabClient.user')
self.mock_api_user.return_value = mock.Mock()
self.mock_api_user.start()
def tearDown(self):
self.mock_api_user.stop()
super(TestGitLabConfigViews, self).tearDown()
def test_folder_list(self):
# GH only lists root folder (repos), this test is superfluous
pass
@mock.patch('addons.gitlab.models.NodeSettings.add_hook')
@mock.patch('addons.gitlab.views.GitLabClient.repo')
def test_set_config(self, mock_repo, mock_add_hook):
# GH selects repos, not folders, so this needs to be overriden
mock_repo.return_value = 'repo_name'
url = self.project.api_url_for('{0}_set_config'.format(self.ADDON_SHORT_NAME))
res = self.app.post_json(url, {
'gitlab_user': 'octocat',
'gitlab_repo': 'repo_name',
'gitlab_repo_id': '123',
}, auth=self.user.auth)
assert_equal(res.status_code, http.OK)
self.project.reload()
assert_equal(
self.project.logs.latest().action,
'{0}_repo_linked'.format(self.ADDON_SHORT_NAME)
)
mock_add_hook.assert_called_once_with(save=False)
# TODO: Test remaining CRUD methods
# TODO: Test exception handling
class TestCRUD(OsfTestCase):
def setUp(self):
super(TestCRUD, self).setUp()
self.gitlab = create_mock_gitlab(user='fred', private=False)
self.user = AuthUserFactory()
self.consolidated_auth = Auth(user=self.user)
self.project = ProjectFactory(creator=self.user)
self.project.add_addon('gitlab', auth=self.consolidated_auth)
self.project.creator.add_addon('gitlab')
self.node_settings = self.project.get_addon('gitlab')
self.node_settings.user_settings = self.project.creator.get_addon('gitlab')
# Set the node addon settings to correspond to the values of the mock repo
self.node_settings.user = self.gitlab.repo.return_value.owner.login
self.node_settings.repo = self.gitlab.repo.return_value.name
self.node_settings.save()
class TestGitLabViews(OsfTestCase):
def setUp(self):
super(TestGitLabViews, self).setUp()
self.user = AuthUserFactory()
self.consolidated_auth = Auth(user=self.user)
self.project = ProjectFactory(creator=self.user)
self.non_authenticator = UserFactory()
self.project.add_contributor(
contributor=self.non_authenticator,
auth=self.consolidated_auth,
)
self.project.save()
self.project.add_addon('gitlab', auth=self.consolidated_auth)
self.project.creator.add_addon('gitlab')
self.project.creator.external_accounts.add(GitLabAccountFactory())
self.project.creator.save()
self.gitlab = create_mock_gitlab(user='fred', private=False)
self.node_settings = self.project.get_addon('gitlab')
self.node_settings.user_settings = self.project.creator.get_addon('gitlab')
# Set the node addon settings to correspond to the values of the mock repo
self.node_settings.user = 'fred'
self.node_settings.repo = 'mock-repo'
self.node_settings.repo_id = 1748448
self.node_settings.save()
def _get_sha_for_branch(self, branch=None, mock_branches=None):
gitlab_mock = self.gitlab
if mock_branches is None:
mock_branches = gitlab_mock.branches
if branch is None: # Get default branch name
branch = self.gitlab.repo.default_branch
for each in mock_branches:
if each.name == branch:
branch_sha = each.commit['id']
return branch_sha
# Tests for _get_refs
@mock.patch('addons.gitlab.api.GitLabClient.branches')
@mock.patch('addons.gitlab.api.GitLabClient.repo')
def test_get_refs_defaults(self, mock_repo, mock_branches):
gitlab_mock = self.gitlab
mock_repo.return_value = gitlab_mock.repo
mock_branches.return_value = gitlab_mock.branches.return_value
branch, sha, branches = utils.get_refs(self.node_settings)
assert_equal(
branch,
gitlab_mock.repo.default_branch
)
assert_equal(sha, branches[0].commit['id']) # Get refs for default branch
assert_equal(
branches,
gitlab_mock.branches.return_value
)
@mock.patch('addons.gitlab.api.GitLabClient.branches')
@mock.patch('addons.gitlab.api.GitLabClient.repo')
def test_get_refs_branch(self, mock_repo, mock_branches):
gitlab_mock = self.gitlab
mock_repo.return_value = gitlab_mock.repo.return_value
mock_branches.return_value = gitlab_mock.branches.return_value
branch, sha, branches = utils.get_refs(self.node_settings, 'master')
assert_equal(branch, 'master')
assert_equal(sha, branches[0].commit['id'])
assert_equal(
branches,
gitlab_mock.branches.return_value
)
def test_before_fork(self):
url = self.project.api_url + 'fork/before/'
res = self.app.get(url, auth=self.user.auth).maybe_follow()
assert_equal(len(res.json['prompts']), 1)
@mock.patch('addons.gitlab.models.UserSettings.has_auth')
def test_before_register(self, mock_has_auth):
mock_has_auth.return_value = True
url = self.project.api_url + 'beforeregister/'
res = self.app.get(url, auth=self.user.auth).maybe_follow()
assert_true('GitLab' in res.json['prompts'][1])
def test_get_refs_sha_no_branch(self):
with assert_raises(HTTPError):
utils.get_refs(self.node_settings, sha='12345')
# Tests for _check_permissions
# make a user with no authorization; make sure check_permissions returns false
def test_permissions_no_auth(self):
gitlab_mock = self.gitlab
# project is set to private right now
connection = gitlab_mock
non_authenticated_user = UserFactory()
non_authenticated_auth = Auth(user=non_authenticated_user)
branch = 'master'
assert_false(check_permissions(self.node_settings, non_authenticated_auth, connection, branch))
# make a repository that doesn't allow push access for this user;
# make sure check_permissions returns false
@mock.patch('addons.gitlab.models.UserSettings.has_auth')
@mock.patch('addons.gitlab.api.GitLabClient.repo')
def test_permissions_no_access(self, mock_repo, mock_has_auth):
gitlab_mock = self.gitlab
mock_has_auth.return_value = True
connection = gitlab_mock
branch = 'master'
mock_repository = mock.Mock(**{
'user': 'fred',
'repo': 'mock-repo',
'permissions': {
'project_access': {'access_level': 20, 'notification_level': 3}
},
})
mock_repo.attributes.return_value = mock_repository
assert_false(check_permissions(self.node_settings, self.consolidated_auth, connection, branch, repo=mock_repository))
# make a branch with a different commit than the commit being passed into check_permissions
@mock.patch('addons.gitlab.models.UserSettings.has_auth')
@mock.patch('addons.gitlab.api.GitLabClient.repo')
def test_permissions_not_head(self, mock_repo, mock_has_auth):
gitlab_mock = self.gitlab
mock_has_auth.return_value = True
connection = gitlab_mock
mock_branch = mock.Mock(**{
'commit': {'id': '67890'}
})
connection.branches.return_value = mock_branch
sha = '12345'
assert_false(check_permissions(self.node_settings, self.consolidated_auth, connection, mock_branch, sha=sha))
# make sure permissions are not granted for editing a registration
@mock.patch('addons.gitlab.models.UserSettings.has_auth')
def test_permissions(self, mock_has_auth):
gitlab_mock = self.gitlab
mock_has_auth.return_value = True
connection = gitlab_mock
with mock.patch('osf.models.node.AbstractNode.is_registration', new_callable=mock.PropertyMock) as mock_is_reg:
mock_is_reg.return_value = True
assert_false(check_permissions(self.node_settings, self.consolidated_auth, connection, 'master'))
def check_hook_urls(self, urls, node, path, sha):
url = node.web_url_for('addon_view_or_download_file', path=path, provider='gitlab')
expected_urls = {
'view': '{0}?branch={1}'.format(url, sha),
'download': '{0}?action=download&branch={1}'.format(url, sha)
}
assert_equal(urls['view'], expected_urls['view'])
assert_equal(urls['download'], expected_urls['download'])
@mock.patch('addons.gitlab.views.verify_hook_signature')
@mock.patch('addons.gitlab.api.GitLabClient.repo')
def test_hook_callback_add_file_not_thro_osf(self, mock_repo, mock_verify):
gitlab_mock = self.gitlab
gitlab_mock.repo = mock_repo
url = '/api/v1/project/{0}/gitlab/hook/'.format(self.project._id)
timestamp = str(datetime.datetime.utcnow())
self.app.post_json(
url,
{
'test': True,
'commits': [{
'id': 'b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
'distinct': True,
'message': 'foo',
'timestamp': timestamp,
'url': 'https://gitlab.com/tester/addontesting/commit/b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
'author': {'name': 'Illidan', 'email': '[email protected]'},
'committer': {'name': 'Testor', 'email': '[email protected]', 'username': 'tester'},
'added': ['PRJWN3TV'],
'removed': [],
'modified': [],
}]
},
content_type='application/json',
).maybe_follow()
self.project.reload()
assert_equal(self.project.logs.latest().action, 'gitlab_file_added')
urls = self.project.logs.latest().params['urls']
self.check_hook_urls(
urls,
self.project,
path='PRJWN3TV',
sha='b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
)
@mock.patch('addons.gitlab.views.verify_hook_signature')
def test_hook_callback_modify_file_not_thro_osf(self, mock_verify):
url = '/api/v1/project/{0}/gitlab/hook/'.format(self.project._id)
timestamp = str(datetime.datetime.utcnow())
self.app.post_json(
url,
{'test': True,
'commits': [{'id': 'b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
'distinct': True,
'message': ' foo',
'timestamp': timestamp,
'url': 'https://gitlab.com/tester/addontesting/commit/b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
'author': {'name': 'Illidan', 'email': '[email protected]'},
'committer': {'name': 'Testor', 'email': '[email protected]',
'username': 'tester'},
'added': [], 'removed':[], 'modified':['PRJWN3TV']}]},
content_type='application/json').maybe_follow()
self.project.reload()
assert_equal(self.project.logs.latest().action, 'gitlab_file_updated')
urls = self.project.logs.latest().params['urls']
self.check_hook_urls(
urls,
self.project,
path='PRJWN3TV',
sha='b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
)
@mock.patch('addons.gitlab.views.verify_hook_signature')
def test_hook_callback_remove_file_not_thro_osf(self, mock_verify):
url = '/api/v1/project/{0}/gitlab/hook/'.format(self.project._id)
timestamp = str(datetime.datetime.utcnow())
self.app.post_json(
url,
{'test': True,
'commits': [{'id': 'b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
'distinct': True,
'message': 'foo',
'timestamp': timestamp,
'url': 'https://gitlab.com/tester/addontesting/commit/b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
'author': {'name': 'Illidan', 'email': '[email protected]'},
'committer': {'name': 'Testor', 'email': '[email protected]', 'username': 'tester'},
'added': [], 'removed': ['PRJWN3TV'], 'modified':[]}]},
content_type='application/json').maybe_follow()
self.project.reload()
assert_equal(self.project.logs.latest().action, 'gitlab_file_removed')
urls = self.project.logs.latest().params['urls']
assert_equal(urls, {})
@mock.patch('addons.gitlab.views.verify_hook_signature')
def test_hook_callback_add_file_thro_osf(self, mock_verify):
url = '/api/v1/project/{0}/gitlab/hook/'.format(self.project._id)
self.app.post_json(
url,
{'test': True,
'commits': [{'id': 'b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
'distinct': True,
'message': 'Added via the Open Science Framework',
'timestamp': '2014-01-08T14:15:51-08:00',
'url': 'https://gitlab.com/tester/addontesting/commit/b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
'author': {'name': 'Illidan', 'email': '[email protected]'},
'committer': {'name': 'Testor', 'email': '[email protected]', 'username': 'tester'},
'added': ['PRJWN3TV'], 'removed':[], 'modified':[]}]},
content_type='application/json').maybe_follow()
self.project.reload()
assert_not_equal(self.project.logs.latest().action, 'gitlab_file_added')
@mock.patch('addons.gitlab.views.verify_hook_signature')
def test_hook_callback_modify_file_thro_osf(self, mock_verify):
url = '/api/v1/project/{0}/gitlab/hook/'.format(self.project._id)
self.app.post_json(
url,
{'test': True,
'commits': [{'id': 'b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
'distinct': True,
'message': 'Updated via the Open Science Framework',
'timestamp': '2014-01-08T14:15:51-08:00',
'url': 'https://gitlab.com/tester/addontesting/commit/b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
'author': {'name': 'Illidan', 'email': '[email protected]'},
'committer': {'name': 'Testor', 'email': '[email protected]', 'username': 'tester'},
'added': [], 'removed':[], 'modified':['PRJWN3TV']}]},
content_type='application/json').maybe_follow()
self.project.reload()
assert_not_equal(self.project.logs.latest().action, 'gitlab_file_updated')
@mock.patch('addons.gitlab.views.verify_hook_signature')
def test_hook_callback_remove_file_thro_osf(self, mock_verify):
url = '/api/v1/project/{0}/gitlab/hook/'.format(self.project._id)
self.app.post_json(
url,
{'test': True,
'commits': [{'id': 'b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
'distinct': True,
'message': 'Deleted via the Open Science Framework',
'timestamp': '2014-01-08T14:15:51-08:00',
'url': 'https://gitlab.com/tester/addontesting/commit/b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
'author': {'name': 'Illidan', 'email': '[email protected]'},
'committer': {'name': 'Testor', 'email': '[email protected]', 'username': 'tester'},
'added': [], 'removed':['PRJWN3TV'], 'modified':[]}]},
content_type='application/json').maybe_follow()
self.project.reload()
assert_not_equal(self.project.logs.latest().action, 'gitlab_file_removed')
class TestRegistrationsWithGitLab(OsfTestCase):
def setUp(self):
super(TestRegistrationsWithGitLab, self).setUp()
self.project = ProjectFactory.build()
self.project.save()
self.consolidated_auth = Auth(user=self.project.creator)
self.project.add_addon('gitlab', auth=self.consolidated_auth)
self.project.creator.add_addon('gitlab')
self.node_settings = self.project.get_addon('gitlab')
self.user_settings = self.project.creator.get_addon('gitlab')
self.node_settings.user_settings = self.user_settings
self.node_settings.user = 'Queen'
self.node_settings.repo = 'Sheer-Heart-Attack'
self.node_settings.save()
class TestGitLabSettings(OsfTestCase):
def setUp(self):
super(TestGitLabSettings, self).setUp()
self.gitlab = create_mock_gitlab(user='fred', private=False)
self.project = ProjectFactory()
self.auth = self.project.creator.auth
self.consolidated_auth = Auth(user=self.project.creator)
self.project.add_addon('gitlab', auth=self.consolidated_auth)
self.project.creator.add_addon('gitlab')
self.node_settings = self.project.get_addon('gitlab')
self.user_settings = self.project.creator.get_addon('gitlab')
self.node_settings.user_settings = self.user_settings
self.node_settings.user = 'Queen'
self.node_settings.repo = 'Sheer-Heart-Attack'
self.node_settings.repo_id = 'sheer-heart-attack'
self.node_settings.save()
@mock.patch('addons.gitlab.models.NodeSettings.add_hook')
@mock.patch('addons.gitlab.api.GitLabClient.repo')
def test_link_repo(self, mock_repo, mock_add_hook):
gitlab_mock = self.gitlab
mock_repo.return_value = gitlab_mock.repo.return_value
url = self.project.api_url + 'gitlab/settings/'
self.app.post_json(
url,
{
'gitlab_user': 'queen',
'gitlab_repo': 'night at the opera',
'gitlab_repo_id': 'abc',
},
auth=self.auth
).maybe_follow()
self.project.reload()
self.node_settings.reload()
assert_equal(self.node_settings.user, 'queen')
assert_equal(self.node_settings.repo, 'night at the opera')
assert_equal(self.project.logs.latest().action, 'gitlab_repo_linked')
mock_add_hook.assert_called_once_with(save=False)
@mock.patch('addons.gitlab.models.NodeSettings.add_hook')
@mock.patch('addons.gitlab.api.GitLabClient.repo')
def test_link_repo_no_change(self, mock_repo, mock_add_hook):
gitlab_mock = self.gitlab
mock_repo.return_value = gitlab_mock.repo.return_value
log_count = self.project.logs.count()
url = self.project.api_url + 'gitlab/settings/'
self.app.post_json(
url,
{
'gitlab_user': self.node_settings.user,
'gitlab_repo': self.node_settings.repo,
'gitlab_repo_id': self.node_settings.repo_id,
},
auth=self.auth
).maybe_follow()
self.project.reload()
self.node_settings.reload()
assert_equal(self.project.logs.count(), log_count)
assert_false(mock_add_hook.called)
@mock.patch('addons.gitlab.api.GitLabClient.repo')
def test_link_repo_non_existent(self, mock_repo):
mock_repo.return_value = None
url = self.project.api_url + 'gitlab/settings/'
res = self.app.post_json(
url,
{
'gitlab_user': 'queen',
'gitlab_repo': 'night at the opera',
},
auth=self.auth,
expect_errors=True
).maybe_follow()
assert_equal(res.status_code, 400)
@mock.patch('addons.gitlab.api.GitLabClient.branches')
def test_link_repo_registration(self, mock_branches):
mock_branches.return_value = [
Branch.from_json(dumps({
'name': 'master',
'commit': {
'sha': '6dcb09b5b57875f334f61aebed695e2e4193db5e',
'url': 'https://api.gitlab.com/repos/octocat/Hello-World/commits/c5b97d5ae6c19d5c5df71a34c7fbeeda2479ccbc',
}
})),
Branch.from_json(dumps({
'name': 'develop',
'commit': {
'sha': '6dcb09b5b57875asdasedawedawedwedaewdwdass',
'url': 'https://api.gitlab.com/repos/octocat/Hello-World/commits/cdcb09b5b57875asdasedawedawedwedaewdwdass',
}
}))
]
registration = self.project.register_node(
schema=get_default_metaschema(),
auth=self.consolidated_auth,
data=''
)
url = registration.api_url + 'gitlab/settings/'
res = self.app.post_json(
url,
{
'gitlab_user': 'queen',
'gitlab_repo': 'night at the opera',
},
auth=self.auth,
expect_errors=True
).maybe_follow()
assert_equal(res.status_code, 400)
@mock.patch('addons.gitlab.models.NodeSettings.delete_hook')
def test_deauthorize(self, mock_delete_hook):
url = self.project.api_url + 'gitlab/user_auth/'
self.app.delete(url, auth=self.auth).maybe_follow()
self.project.reload()
self.node_settings.reload()
assert_equal(self.node_settings.user, None)
assert_equal(self.node_settings.repo, None)
assert_equal(self.node_settings.user_settings, None)
assert_equal(self.project.logs.latest().action, 'gitlab_node_deauthorized')
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
Jorge-Rodriguez/ansible
|
lib/ansible/modules/cloud/openstack/os_server_action.py
|
54
|
8029
|
#!/usr/bin/python
# coding: utf-8 -*-
# Copyright (c) 2015, Jesse Keating <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_server_action
short_description: Perform actions on Compute Instances from OpenStack
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Jesse Keating (@omgjlk)"
description:
- Perform server actions on an existing compute instance from OpenStack.
This module does not return any data other than changed true/false.
When I(action) is 'rebuild', then I(image) parameter is required.
options:
server:
description:
- Name or ID of the instance
required: true
wait:
description:
- If the module should wait for the instance action to be performed.
type: bool
default: 'yes'
timeout:
description:
- The amount of time the module should wait for the instance to perform
the requested action.
default: 180
action:
description:
- Perform the given action. The lock and unlock actions always return
changed as the servers API does not provide lock status.
choices: [stop, start, pause, unpause, lock, unlock, suspend, resume,
rebuild]
default: present
image:
description:
- Image the server should be rebuilt with
version_added: "2.3"
availability_zone:
description:
- Ignored. Present for backwards compatibility
requirements:
- "python >= 2.7"
- "openstacksdk"
'''
EXAMPLES = '''
# Pauses a compute instance
- os_server_action:
action: pause
auth:
auth_url: https://identity.example.com
username: admin
password: admin
project_name: admin
server: vm1
timeout: 200
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
_action_map = {'stop': 'SHUTOFF',
'start': 'ACTIVE',
'pause': 'PAUSED',
'unpause': 'ACTIVE',
'lock': 'ACTIVE', # API doesn't show lock/unlock status
'unlock': 'ACTIVE',
'suspend': 'SUSPENDED',
'resume': 'ACTIVE',
'rebuild': 'ACTIVE'}
_admin_actions = ['pause', 'unpause', 'suspend', 'resume', 'lock', 'unlock']
def _action_url(server_id):
return '/servers/{server_id}/action'.format(server_id=server_id)
def _wait(timeout, cloud, server, action, module, sdk):
"""Wait for the server to reach the desired state for the given action."""
for count in sdk.utils.iterate_timeout(
timeout,
"Timeout waiting for server to complete %s" % action):
try:
server = cloud.get_server(server.id)
except Exception:
continue
if server.status == _action_map[action]:
return
if server.status == 'ERROR':
module.fail_json(msg="Server reached ERROR state while attempting to %s" % action)
def _system_state_change(action, status):
"""Check if system state would change."""
if status == _action_map[action]:
return False
return True
def main():
argument_spec = openstack_full_argument_spec(
server=dict(required=True),
action=dict(required=True, choices=['stop', 'start', 'pause', 'unpause',
'lock', 'unlock', 'suspend', 'resume',
'rebuild']),
image=dict(required=False),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, supports_check_mode=True,
required_if=[('action', 'rebuild', ['image'])],
**module_kwargs)
action = module.params['action']
wait = module.params['wait']
timeout = module.params['timeout']
image = module.params['image']
sdk, cloud = openstack_cloud_from_module(module)
try:
server = cloud.get_server(module.params['server'])
if not server:
module.fail_json(msg='Could not find server %s' % server)
status = server.status
if module.check_mode:
module.exit_json(changed=_system_state_change(action, status))
if action == 'stop':
if not _system_state_change(action, status):
module.exit_json(changed=False)
cloud.compute.post(
_action_url(server.id),
json={'os-stop': None})
if wait:
_wait(timeout, cloud, server, action, module, sdk)
module.exit_json(changed=True)
if action == 'start':
if not _system_state_change(action, status):
module.exit_json(changed=False)
cloud.compute.post(
_action_url(server.id),
json={'os-start': None})
if wait:
_wait(timeout, cloud, server, action, module, sdk)
module.exit_json(changed=True)
if action == 'pause':
if not _system_state_change(action, status):
module.exit_json(changed=False)
cloud.compute.post(
_action_url(server.id),
json={'pause': None})
if wait:
_wait(timeout, cloud, server, action, module, sdk)
module.exit_json(changed=True)
elif action == 'unpause':
if not _system_state_change(action, status):
module.exit_json(changed=False)
cloud.compute.post(
_action_url(server.id),
json={'unpause': None})
if wait:
_wait(timeout, cloud, server, action, module, sdk)
module.exit_json(changed=True)
elif action == 'lock':
# lock doesn't set a state, just do it
cloud.compute.post(
_action_url(server.id),
json={'lock': None})
module.exit_json(changed=True)
elif action == 'unlock':
# unlock doesn't set a state, just do it
cloud.compute.post(
_action_url(server.id),
json={'unlock': None})
module.exit_json(changed=True)
elif action == 'suspend':
if not _system_state_change(action, status):
module.exit_json(changed=False)
cloud.compute.post(
_action_url(server.id),
json={'suspend': None})
if wait:
_wait(timeout, cloud, server, action, module, sdk)
module.exit_json(changed=True)
elif action == 'resume':
if not _system_state_change(action, status):
module.exit_json(changed=False)
cloud.compute.post(
_action_url(server.id),
json={'resume': None})
if wait:
_wait(timeout, cloud, server, action, module, sdk)
module.exit_json(changed=True)
elif action == 'rebuild':
image = cloud.get_image(image)
if image is None:
module.fail_json(msg="Image does not exist")
# rebuild doesn't set a state, just do it
cloud.compute.post(
_action_url(server.id),
json={'rebuild': None})
if wait:
_wait(timeout, cloud, server, action, module, sdk)
module.exit_json(changed=True)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=str(e), extra_data=e.extra_data)
if __name__ == '__main__':
main()
|
gpl-3.0
|
listamilton/supermilton.repository
|
script.module.youtube.dl/lib/youtube_dl/extractor/giga.py
|
142
|
3819
|
# coding: utf-8
from __future__ import unicode_literals
import itertools
from .common import InfoExtractor
from ..utils import (
qualities,
compat_str,
parse_duration,
parse_iso8601,
str_to_int,
)
class GigaIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?giga\.de/(?:[^/]+/)*(?P<id>[^/]+)'
_TESTS = [{
'url': 'http://www.giga.de/filme/anime-awesome/trailer/anime-awesome-chihiros-reise-ins-zauberland-das-beste-kommt-zum-schluss/',
'md5': '6bc5535e945e724640664632055a584f',
'info_dict': {
'id': '2622086',
'display_id': 'anime-awesome-chihiros-reise-ins-zauberland-das-beste-kommt-zum-schluss',
'ext': 'mp4',
'title': 'Anime Awesome: Chihiros Reise ins Zauberland – Das Beste kommt zum Schluss',
'description': 'md5:afdf5862241aded4718a30dff6a57baf',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 578,
'timestamp': 1414749706,
'upload_date': '20141031',
'uploader': 'Robin Schweiger',
'view_count': int,
},
}, {
'url': 'http://www.giga.de/games/channel/giga-top-montag/giga-topmontag-die-besten-serien-2014/',
'only_matching': True,
}, {
'url': 'http://www.giga.de/extra/netzkultur/videos/giga-games-tom-mats-robin-werden-eigene-wege-gehen-eine-ankuendigung/',
'only_matching': True,
}, {
'url': 'http://www.giga.de/tv/jonas-liest-spieletitel-eingedeutscht-episode-2/',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(
[r'data-video-id="(\d+)"', r'/api/video/jwplayer/#v=(\d+)'],
webpage, 'video id')
playlist = self._download_json(
'http://www.giga.de/api/syndication/video/video_id/%s/playlist.json?content=syndication/key/368b5f151da4ae05ced7fa296bdff65a/'
% video_id, video_id)[0]
quality = qualities(['normal', 'hd720'])
formats = []
for format_id in itertools.count(0):
fmt = playlist.get(compat_str(format_id))
if not fmt:
break
formats.append({
'url': fmt['src'],
'format_id': '%s-%s' % (fmt['quality'], fmt['type'].split('/')[-1]),
'quality': quality(fmt['quality']),
})
self._sort_formats(formats)
title = self._html_search_meta(
'title', webpage, 'title', fatal=True)
description = self._html_search_meta(
'description', webpage, 'description')
thumbnail = self._og_search_thumbnail(webpage)
duration = parse_duration(self._search_regex(
r'(?s)(?:data-video-id="{0}"|data-video="[^"]*/api/video/jwplayer/#v={0}[^"]*")[^>]*>.+?<span class="duration">([^<]+)</span>'.format(video_id),
webpage, 'duration', fatal=False))
timestamp = parse_iso8601(self._search_regex(
r'datetime="([^"]+)"', webpage, 'upload date', fatal=False))
uploader = self._search_regex(
r'class="author">([^<]+)</a>', webpage, 'uploader', fatal=False)
view_count = str_to_int(self._search_regex(
r'<span class="views"><strong>([\d.,]+)</strong>',
webpage, 'view count', fatal=False))
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'timestamp': timestamp,
'uploader': uploader,
'view_count': view_count,
'formats': formats,
}
|
gpl-2.0
|
tima/ansible
|
lib/ansible/module_utils/facts/system/selinux.py
|
162
|
3207
|
# Collect facts related to selinux
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.facts.collector import BaseFactCollector
try:
import selinux
HAVE_SELINUX = True
except ImportError:
HAVE_SELINUX = False
SELINUX_MODE_DICT = {
1: 'enforcing',
0: 'permissive',
-1: 'disabled'
}
class SelinuxFactCollector(BaseFactCollector):
name = 'selinux'
_fact_ids = set()
def collect(self, module=None, collected_facts=None):
facts_dict = {}
selinux_facts = {}
# If selinux library is missing, only set the status and selinux_python_present since
# there is no way to tell if SELinux is enabled or disabled on the system
# without the library.
if not HAVE_SELINUX:
selinux_facts['status'] = 'Missing selinux Python library'
facts_dict['selinux'] = selinux_facts
facts_dict['selinux_python_present'] = False
return facts_dict
# Set a boolean for testing whether the Python library is present
facts_dict['selinux_python_present'] = True
if not selinux.is_selinux_enabled():
selinux_facts['status'] = 'disabled'
else:
selinux_facts['status'] = 'enabled'
try:
selinux_facts['policyvers'] = selinux.security_policyvers()
except (AttributeError, OSError):
selinux_facts['policyvers'] = 'unknown'
try:
(rc, configmode) = selinux.selinux_getenforcemode()
if rc == 0:
selinux_facts['config_mode'] = SELINUX_MODE_DICT.get(configmode, 'unknown')
else:
selinux_facts['config_mode'] = 'unknown'
except (AttributeError, OSError):
selinux_facts['config_mode'] = 'unknown'
try:
mode = selinux.security_getenforce()
selinux_facts['mode'] = SELINUX_MODE_DICT.get(mode, 'unknown')
except (AttributeError, OSError):
selinux_facts['mode'] = 'unknown'
try:
(rc, policytype) = selinux.selinux_getpolicytype()
if rc == 0:
selinux_facts['type'] = policytype
else:
selinux_facts['type'] = 'unknown'
except (AttributeError, OSError):
selinux_facts['type'] = 'unknown'
facts_dict['selinux'] = selinux_facts
return facts_dict
|
gpl-3.0
|
husman/WoTrack
|
apps/wordtrack/levenshtein_reduce.py
|
1
|
1308
|
from nltk import metrics
class LevenshteinReduce(object):
def __init__(self, phrase, tracks):
"""
:param phrase: (str) phrase or ngram
:param tracks: (list) tacks to perform best string matching with
:return: Returns the track from the list of tracks best matching the given phrase
"""
self.phrases = phrase
self.tracks = tracks
def get_most_similar_track(self):
"""
Determines the levenshtein distance between each track and phrase
:return: track (object) the track with the smallest levenshtein with the phrase
"""
if self.tracks is None:
return
levenshteins = [
{
'levenshtein': metrics.edit_distance(self.phrases, track['name']),
'url': track['url'],
'name': track['name'],
'artist': track['artist'],
'image': track['image'],
'phrase': self.phrases,
}
for track in self.tracks
]
minimum_distance = None
if levenshteins:
minimum_distance = reduce(
lambda d1, d2: d1 if d1['levenshtein'] < d2['levenshtein'] else d2,
levenshteins
)
return minimum_distance
|
mit
|
samuelclay/NewsBlur
|
apps/monitor/views/newsblur_app_times.py
|
1
|
1188
|
from django.views import View
from django.shortcuts import render
import datetime
from django.conf import settings
class AppTimes(View):
def get(self, request):
servers = dict((("%s" % s['_id'], s['page_load']) for s in self.stats))
data = servers
chart_name = "app_times"
chart_type = "counter"
formatted_data = {}
for k, v in data.items():
formatted_data[k] = f'{chart_name}{{app_server="{k}"}} {v}'
context = {
"data": formatted_data,
"chart_name": chart_name,
"chart_type": chart_type,
}
return render(request, 'monitor/prometheus_data.html', context, content_type="text/plain")
@property
def stats(self):
stats = settings.MONGOANALYTICSDB.nbanalytics.page_loads.aggregate([{
"$match": {
"date": {
"$gt": datetime.datetime.now() - datetime.timedelta(minutes=5),
},
},
}, {
"$group": {
"_id" : "$server",
"page_load" : {"$avg": "$page_load"},
},
}])
return list(stats)
|
mit
|
oritwas/bonzini
|
scripts/tracetool/backend/stderr.py
|
114
|
1385
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Stderr built-in backend.
"""
__author__ = "Lluís Vilanova <[email protected]>"
__copyright__ = "Copyright 2012, Lluís Vilanova <[email protected]>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "[email protected]"
from tracetool import out
def c(events):
out('#include "trace.h"',
'',
'TraceEvent trace_list[] = {')
for e in events:
out('{.tp_name = "%(name)s", .state=0},',
name = e.name,
)
out('};')
def h(events):
out('#include <stdio.h>',
'#include "trace/stderr.h"',
'',
'extern TraceEvent trace_list[];')
for num, e in enumerate(events):
argnames = ", ".join(e.args.names())
if len(e.args) > 0:
argnames = ", " + argnames
out('static inline void trace_%(name)s(%(args)s)',
'{',
' if (trace_list[%(event_num)s].state != 0) {',
' fprintf(stderr, "%(name)s " %(fmt)s "\\n" %(argnames)s);',
' }',
'}',
name = e.name,
args = e.args,
event_num = num,
fmt = e.fmt,
argnames = argnames,
)
out('',
'#define NR_TRACE_EVENTS %d' % len(events))
|
gpl-2.0
|
MrLoick/python-for-android
|
python-modules/twisted/twisted/trial/unittest.py
|
59
|
57773
|
# -*- test-case-name: twisted.trial.test.test_tests -*-
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Things likely to be used by writers of unit tests.
Maintainer: Jonathan Lange
"""
import doctest, inspect
import os, warnings, sys, tempfile, gc, types
from pprint import pformat
try:
from dis import findlinestarts as _findlinestarts
except ImportError:
# Definition copied from Python's Lib/dis.py - findlinestarts was not
# available in Python 2.3. This function is copyright Python Software
# Foundation, released under the Python license:
# http://www.python.org/psf/license/
def _findlinestarts(code):
"""Find the offsets in a byte code which are start of lines in the source.
Generate pairs (offset, lineno) as described in Python/compile.c.
"""
byte_increments = [ord(c) for c in code.co_lnotab[0::2]]
line_increments = [ord(c) for c in code.co_lnotab[1::2]]
lastlineno = None
lineno = code.co_firstlineno
addr = 0
for byte_incr, line_incr in zip(byte_increments, line_increments):
if byte_incr:
if lineno != lastlineno:
yield (addr, lineno)
lastlineno = lineno
addr += byte_incr
lineno += line_incr
if lineno != lastlineno:
yield (addr, lineno)
from twisted.internet import defer, utils
from twisted.python import components, failure, log, monkey
from twisted.python.deprecate import getDeprecationWarningString
from twisted.trial import itrial, reporter, util
pyunit = __import__('unittest')
from zope.interface import implements
class SkipTest(Exception):
"""
Raise this (with a reason) to skip the current test. You may also set
method.skip to a reason string to skip it, or set class.skip to skip the
entire TestCase.
"""
class FailTest(AssertionError):
"""Raised to indicate the current test has failed to pass."""
class Todo(object):
"""
Internal object used to mark a L{TestCase} as 'todo'. Tests marked 'todo'
are reported differently in Trial L{TestResult}s. If todo'd tests fail,
they do not fail the suite and the errors are reported in a separate
category. If todo'd tests succeed, Trial L{TestResult}s will report an
unexpected success.
"""
def __init__(self, reason, errors=None):
"""
@param reason: A string explaining why the test is marked 'todo'
@param errors: An iterable of exception types that the test is
expected to raise. If one of these errors is raised by the test, it
will be trapped. Raising any other kind of error will fail the test.
If C{None} is passed, then all errors will be trapped.
"""
self.reason = reason
self.errors = errors
def __repr__(self):
return "<Todo reason=%r errors=%r>" % (self.reason, self.errors)
def expected(self, failure):
"""
@param failure: A L{twisted.python.failure.Failure}.
@return: C{True} if C{failure} is expected, C{False} otherwise.
"""
if self.errors is None:
return True
for error in self.errors:
if failure.check(error):
return True
return False
def makeTodo(value):
"""
Return a L{Todo} object built from C{value}.
If C{value} is a string, return a Todo that expects any exception with
C{value} as a reason. If C{value} is a tuple, the second element is used
as the reason and the first element as the excepted error(s).
@param value: A string or a tuple of C{(errors, reason)}, where C{errors}
is either a single exception class or an iterable of exception classes.
@return: A L{Todo} object.
"""
if isinstance(value, str):
return Todo(reason=value)
if isinstance(value, tuple):
errors, reason = value
try:
errors = list(errors)
except TypeError:
errors = [errors]
return Todo(reason=reason, errors=errors)
class _Warning(object):
"""
A L{_Warning} instance represents one warning emitted through the Python
warning system (L{warnings}). This is used to insulate callers of
L{_collectWarnings} from changes to the Python warnings system which might
otherwise require changes to the warning objects that function passes to
the observer object it accepts.
@ivar message: The string which was passed as the message parameter to
L{warnings.warn}.
@ivar category: The L{Warning} subclass which was passed as the category
parameter to L{warnings.warn}.
@ivar filename: The name of the file containing the definition of the code
object which was C{stacklevel} frames above the call to
L{warnings.warn}, where C{stacklevel} is the value of the C{stacklevel}
parameter passed to L{warnings.warn}.
@ivar lineno: The source line associated with the active instruction of the
code object object which was C{stacklevel} frames above the call to
L{warnings.warn}, where C{stacklevel} is the value of the C{stacklevel}
parameter passed to L{warnings.warn}.
"""
def __init__(self, message, category, filename, lineno):
self.message = message
self.category = category
self.filename = filename
self.lineno = lineno
def _collectWarnings(observeWarning, f, *args, **kwargs):
"""
Call C{f} with C{args} positional arguments and C{kwargs} keyword arguments
and collect all warnings which are emitted as a result in a list.
@param observeWarning: A callable which will be invoked with a L{_Warning}
instance each time a warning is emitted.
@return: The return value of C{f(*args, **kwargs)}.
"""
def showWarning(message, category, filename, lineno, file=None, line=None):
assert isinstance(message, Warning)
observeWarning(_Warning(
message.args[0], category, filename, lineno))
# Disable the per-module cache for every module otherwise if the warning
# which the caller is expecting us to collect was already emitted it won't
# be re-emitted by the call to f which happens below.
for v in sys.modules.itervalues():
if v is not None:
try:
v.__warningregistry__ = None
except:
# Don't specify a particular exception type to handle in case
# some wacky object raises some wacky exception in response to
# the setattr attempt.
pass
origFilters = warnings.filters[:]
origShow = warnings.showwarning
warnings.simplefilter('always')
try:
warnings.showwarning = showWarning
result = f(*args, **kwargs)
finally:
warnings.filters[:] = origFilters
warnings.showwarning = origShow
return result
class _Assertions(pyunit.TestCase, object):
"""
Replaces many of the built-in TestCase assertions. In general, these
assertions provide better error messages and are easier to use in
callbacks. Also provides new assertions such as L{failUnlessFailure}.
Although the tests are defined as 'failIf*' and 'failUnless*', they can
also be called as 'assertNot*' and 'assert*'.
"""
def fail(self, msg=None):
"""
Absolutely fail the test. Do not pass go, do not collect $200.
@param msg: the message that will be displayed as the reason for the
failure
"""
raise self.failureException(msg)
def failIf(self, condition, msg=None):
"""
Fail the test if C{condition} evaluates to True.
@param condition: any object that defines __nonzero__
"""
if condition:
raise self.failureException(msg)
return condition
assertNot = assertFalse = failUnlessFalse = failIf
def failUnless(self, condition, msg=None):
"""
Fail the test if C{condition} evaluates to False.
@param condition: any object that defines __nonzero__
"""
if not condition:
raise self.failureException(msg)
return condition
assert_ = assertTrue = failUnlessTrue = failUnless
def failUnlessRaises(self, exception, f, *args, **kwargs):
"""
Fail the test unless calling the function C{f} with the given
C{args} and C{kwargs} raises C{exception}. The failure will report
the traceback and call stack of the unexpected exception.
@param exception: exception type that is to be expected
@param f: the function to call
@return: The raised exception instance, if it is of the given type.
@raise self.failureException: Raised if the function call does
not raise an exception or if it raises an exception of a
different type.
"""
try:
result = f(*args, **kwargs)
except exception, inst:
return inst
except:
raise self.failureException('%s raised instead of %s:\n %s'
% (sys.exc_info()[0],
exception.__name__,
failure.Failure().getTraceback()))
else:
raise self.failureException('%s not raised (%r returned)'
% (exception.__name__, result))
assertRaises = failUnlessRaises
def failUnlessEqual(self, first, second, msg=''):
"""
Fail the test if C{first} and C{second} are not equal.
@param msg: A string describing the failure that's included in the
exception.
"""
if not first == second:
if msg is None:
msg = ''
if len(msg) > 0:
msg += '\n'
raise self.failureException(
'%snot equal:\na = %s\nb = %s\n'
% (msg, pformat(first), pformat(second)))
return first
assertEqual = assertEquals = failUnlessEquals = failUnlessEqual
def failUnlessIdentical(self, first, second, msg=None):
"""
Fail the test if C{first} is not C{second}. This is an
obect-identity-equality test, not an object equality
(i.e. C{__eq__}) test.
@param msg: if msg is None, then the failure message will be
'%r is not %r' % (first, second)
"""
if first is not second:
raise self.failureException(msg or '%r is not %r' % (first, second))
return first
assertIdentical = failUnlessIdentical
def failIfIdentical(self, first, second, msg=None):
"""
Fail the test if C{first} is C{second}. This is an
obect-identity-equality test, not an object equality
(i.e. C{__eq__}) test.
@param msg: if msg is None, then the failure message will be
'%r is %r' % (first, second)
"""
if first is second:
raise self.failureException(msg or '%r is %r' % (first, second))
return first
assertNotIdentical = failIfIdentical
def failIfEqual(self, first, second, msg=None):
"""
Fail the test if C{first} == C{second}.
@param msg: if msg is None, then the failure message will be
'%r == %r' % (first, second)
"""
if not first != second:
raise self.failureException(msg or '%r == %r' % (first, second))
return first
assertNotEqual = assertNotEquals = failIfEquals = failIfEqual
def failUnlessIn(self, containee, container, msg=None):
"""
Fail the test if C{containee} is not found in C{container}.
@param containee: the value that should be in C{container}
@param container: a sequence type, or in the case of a mapping type,
will follow semantics of 'if key in dict.keys()'
@param msg: if msg is None, then the failure message will be
'%r not in %r' % (first, second)
"""
if containee not in container:
raise self.failureException(msg or "%r not in %r"
% (containee, container))
return containee
assertIn = failUnlessIn
def failIfIn(self, containee, container, msg=None):
"""
Fail the test if C{containee} is found in C{container}.
@param containee: the value that should not be in C{container}
@param container: a sequence type, or in the case of a mapping type,
will follow semantics of 'if key in dict.keys()'
@param msg: if msg is None, then the failure message will be
'%r in %r' % (first, second)
"""
if containee in container:
raise self.failureException(msg or "%r in %r"
% (containee, container))
return containee
assertNotIn = failIfIn
def failIfAlmostEqual(self, first, second, places=7, msg=None):
"""
Fail if the two objects are equal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero.
@note: decimal places (from zero) is usually not the same
as significant digits (measured from the most
signficant digit).
@note: included for compatiblity with PyUnit test cases
"""
if round(second-first, places) == 0:
raise self.failureException(msg or '%r == %r within %r places'
% (first, second, places))
return first
assertNotAlmostEqual = assertNotAlmostEquals = failIfAlmostEqual
failIfAlmostEquals = failIfAlmostEqual
def failUnlessAlmostEqual(self, first, second, places=7, msg=None):
"""
Fail if the two objects are unequal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero.
@note: decimal places (from zero) is usually not the same
as significant digits (measured from the most
signficant digit).
@note: included for compatiblity with PyUnit test cases
"""
if round(second-first, places) != 0:
raise self.failureException(msg or '%r != %r within %r places'
% (first, second, places))
return first
assertAlmostEqual = assertAlmostEquals = failUnlessAlmostEqual
failUnlessAlmostEquals = failUnlessAlmostEqual
def failUnlessApproximates(self, first, second, tolerance, msg=None):
"""
Fail if C{first} - C{second} > C{tolerance}
@param msg: if msg is None, then the failure message will be
'%r ~== %r' % (first, second)
"""
if abs(first - second) > tolerance:
raise self.failureException(msg or "%s ~== %s" % (first, second))
return first
assertApproximates = failUnlessApproximates
def failUnlessFailure(self, deferred, *expectedFailures):
"""
Fail if C{deferred} does not errback with one of C{expectedFailures}.
Returns the original Deferred with callbacks added. You will need
to return this Deferred from your test case.
"""
def _cb(ignore):
raise self.failureException(
"did not catch an error, instead got %r" % (ignore,))
def _eb(failure):
if failure.check(*expectedFailures):
return failure.value
else:
output = ('\nExpected: %r\nGot:\n%s'
% (expectedFailures, str(failure)))
raise self.failureException(output)
return deferred.addCallbacks(_cb, _eb)
assertFailure = failUnlessFailure
def failUnlessSubstring(self, substring, astring, msg=None):
"""
Fail if C{substring} does not exist within C{astring}.
"""
return self.failUnlessIn(substring, astring, msg)
assertSubstring = failUnlessSubstring
def failIfSubstring(self, substring, astring, msg=None):
"""
Fail if C{astring} contains C{substring}.
"""
return self.failIfIn(substring, astring, msg)
assertNotSubstring = failIfSubstring
def failUnlessWarns(self, category, message, filename, f,
*args, **kwargs):
"""
Fail if the given function doesn't generate the specified warning when
called. It calls the function, checks the warning, and forwards the
result of the function if everything is fine.
@param category: the category of the warning to check.
@param message: the output message of the warning to check.
@param filename: the filename where the warning should come from.
@param f: the function which is supposed to generate the warning.
@type f: any callable.
@param args: the arguments to C{f}.
@param kwargs: the keywords arguments to C{f}.
@return: the result of the original function C{f}.
"""
warningsShown = []
result = _collectWarnings(warningsShown.append, f, *args, **kwargs)
if not warningsShown:
self.fail("No warnings emitted")
first = warningsShown[0]
for other in warningsShown[1:]:
if ((other.message, other.category)
!= (first.message, first.category)):
self.fail("Can't handle different warnings")
self.assertEqual(first.message, message)
self.assertIdentical(first.category, category)
# Use starts with because of .pyc/.pyo issues.
self.failUnless(
filename.startswith(first.filename),
'Warning in %r, expected %r' % (first.filename, filename))
# It would be nice to be able to check the line number as well, but
# different configurations actually end up reporting different line
# numbers (generally the variation is only 1 line, but that's enough
# to fail the test erroneously...).
# self.assertEqual(lineno, xxx)
return result
assertWarns = failUnlessWarns
def failUnlessIsInstance(self, instance, classOrTuple):
"""
Fail if C{instance} is not an instance of the given class or of
one of the given classes.
@param instance: the object to test the type (first argument of the
C{isinstance} call).
@type instance: any.
@param classOrTuple: the class or classes to test against (second
argument of the C{isinstance} call).
@type classOrTuple: class, type, or tuple.
"""
if not isinstance(instance, classOrTuple):
self.fail("%r is not an instance of %s" % (instance, classOrTuple))
assertIsInstance = failUnlessIsInstance
def failIfIsInstance(self, instance, classOrTuple):
"""
Fail if C{instance} is not an instance of the given class or of
one of the given classes.
@param instance: the object to test the type (first argument of the
C{isinstance} call).
@type instance: any.
@param classOrTuple: the class or classes to test against (second
argument of the C{isinstance} call).
@type classOrTuple: class, type, or tuple.
"""
if isinstance(instance, classOrTuple):
self.fail("%r is an instance of %s" % (instance, classOrTuple))
assertNotIsInstance = failIfIsInstance
class _LogObserver(object):
"""
Observes the Twisted logs and catches any errors.
@ivar _errors: A C{list} of L{Failure} instances which were received as
error events from the Twisted logging system.
@ivar _added: A C{int} giving the number of times C{_add} has been called
less the number of times C{_remove} has been called; used to only add
this observer to the Twisted logging since once, regardless of the
number of calls to the add method.
@ivar _ignored: A C{list} of exception types which will not be recorded.
"""
def __init__(self):
self._errors = []
self._added = 0
self._ignored = []
def _add(self):
if self._added == 0:
log.addObserver(self.gotEvent)
self._oldFE, log._flushErrors = (log._flushErrors, self.flushErrors)
self._oldIE, log._ignore = (log._ignore, self._ignoreErrors)
self._oldCI, log._clearIgnores = (log._clearIgnores,
self._clearIgnores)
self._added += 1
def _remove(self):
self._added -= 1
if self._added == 0:
log.removeObserver(self.gotEvent)
log._flushErrors = self._oldFE
log._ignore = self._oldIE
log._clearIgnores = self._oldCI
def _ignoreErrors(self, *errorTypes):
"""
Do not store any errors with any of the given types.
"""
self._ignored.extend(errorTypes)
def _clearIgnores(self):
"""
Stop ignoring any errors we might currently be ignoring.
"""
self._ignored = []
def flushErrors(self, *errorTypes):
"""
Flush errors from the list of caught errors. If no arguments are
specified, remove all errors. If arguments are specified, only remove
errors of those types from the stored list.
"""
if errorTypes:
flushed = []
remainder = []
for f in self._errors:
if f.check(*errorTypes):
flushed.append(f)
else:
remainder.append(f)
self._errors = remainder
else:
flushed = self._errors
self._errors = []
return flushed
def getErrors(self):
"""
Return a list of errors caught by this observer.
"""
return self._errors
def gotEvent(self, event):
"""
The actual observer method. Called whenever a message is logged.
@param event: A dictionary containing the log message. Actual
structure undocumented (see source for L{twisted.python.log}).
"""
if event.get('isError', False) and 'failure' in event:
f = event['failure']
if len(self._ignored) == 0 or not f.check(*self._ignored):
self._errors.append(f)
_logObserver = _LogObserver()
_wait_is_running = []
class TestCase(_Assertions):
"""
A unit test. The atom of the unit testing universe.
This class extends C{unittest.TestCase} from the standard library. The
main feature is the ability to return C{Deferred}s from tests and fixture
methods and to have the suite wait for those C{Deferred}s to fire.
To write a unit test, subclass C{TestCase} and define a method (say,
'test_foo') on the subclass. To run the test, instantiate your subclass
with the name of the method, and call L{run} on the instance, passing a
L{TestResult} object.
The C{trial} script will automatically find any C{TestCase} subclasses
defined in modules beginning with 'test_' and construct test cases for all
methods beginning with 'test'.
If an error is logged during the test run, the test will fail with an
error. See L{log.err}.
@ivar failureException: An exception class, defaulting to C{FailTest}. If
the test method raises this exception, it will be reported as a failure,
rather than an exception. All of the assertion methods raise this if the
assertion fails.
@ivar skip: C{None} or a string explaining why this test is to be
skipped. If defined, the test will not be run. Instead, it will be
reported to the result object as 'skipped' (if the C{TestResult} supports
skipping).
@ivar suppress: C{None} or a list of tuples of C{(args, kwargs)} to be
passed to C{warnings.filterwarnings}. Use these to suppress warnings
raised in a test. Useful for testing deprecated code. See also
L{util.suppress}.
@ivar timeout: A real number of seconds. If set, the test will
raise an error if it takes longer than C{timeout} seconds.
If not set, util.DEFAULT_TIMEOUT_DURATION is used.
@ivar todo: C{None}, a string or a tuple of C{(errors, reason)} where
C{errors} is either an exception class or an iterable of exception
classes, and C{reason} is a string. See L{Todo} or L{makeTodo} for more
information.
"""
implements(itrial.ITestCase)
failureException = FailTest
def __init__(self, methodName='runTest'):
"""
Construct an asynchronous test case for C{methodName}.
@param methodName: The name of a method on C{self}. This method should
be a unit test. That is, it should be a short method that calls some of
the assert* methods. If C{methodName} is unspecified, L{runTest} will
be used as the test method. This is mostly useful for testing Trial.
"""
super(TestCase, self).__init__(methodName)
self._testMethodName = methodName
testMethod = getattr(self, methodName)
self._parents = [testMethod, self]
self._parents.extend(util.getPythonContainers(testMethod))
self._passed = False
self._cleanups = []
if sys.version_info >= (2, 6):
# Override the comparison defined by the base TestCase which considers
# instances of the same class with the same _testMethodName to be
# equal. Since trial puts TestCase instances into a set, that
# definition of comparison makes it impossible to run the same test
# method twice. Most likely, trial should stop using a set to hold
# tests, but until it does, this is necessary on Python 2.6. Only
# __eq__ and __ne__ are required here, not __hash__, since the
# inherited __hash__ is compatible with these equality semantics. A
# different __hash__ might be slightly more efficient (by reducing
# collisions), but who cares? -exarkun
def __eq__(self, other):
return self is other
def __ne__(self, other):
return self is not other
def _run(self, methodName, result):
from twisted.internet import reactor
timeout = self.getTimeout()
def onTimeout(d):
e = defer.TimeoutError("%r (%s) still running at %s secs"
% (self, methodName, timeout))
f = failure.Failure(e)
# try to errback the deferred that the test returns (for no gorram
# reason) (see issue1005 and test_errorPropagation in
# test_deferred)
try:
d.errback(f)
except defer.AlreadyCalledError:
# if the deferred has been called already but the *back chain
# is still unfinished, crash the reactor and report timeout
# error ourself.
reactor.crash()
self._timedOut = True # see self._wait
todo = self.getTodo()
if todo is not None and todo.expected(f):
result.addExpectedFailure(self, f, todo)
else:
result.addError(self, f)
onTimeout = utils.suppressWarnings(
onTimeout, util.suppress(category=DeprecationWarning))
method = getattr(self, methodName)
d = defer.maybeDeferred(utils.runWithWarningsSuppressed,
self.getSuppress(), method)
call = reactor.callLater(timeout, onTimeout, d)
d.addBoth(lambda x : call.active() and call.cancel() or x)
return d
def shortDescription(self):
desc = super(TestCase, self).shortDescription()
if desc is None:
return self._testMethodName
return desc
def __call__(self, *args, **kwargs):
return self.run(*args, **kwargs)
def deferSetUp(self, ignored, result):
d = self._run('setUp', result)
d.addCallbacks(self.deferTestMethod, self._ebDeferSetUp,
callbackArgs=(result,),
errbackArgs=(result,))
return d
def _ebDeferSetUp(self, failure, result):
if failure.check(SkipTest):
result.addSkip(self, self._getReason(failure))
else:
result.addError(self, failure)
if failure.check(KeyboardInterrupt):
result.stop()
return self.deferRunCleanups(None, result)
def deferTestMethod(self, ignored, result):
d = self._run(self._testMethodName, result)
d.addCallbacks(self._cbDeferTestMethod, self._ebDeferTestMethod,
callbackArgs=(result,),
errbackArgs=(result,))
d.addBoth(self.deferRunCleanups, result)
d.addBoth(self.deferTearDown, result)
return d
def _cbDeferTestMethod(self, ignored, result):
if self.getTodo() is not None:
result.addUnexpectedSuccess(self, self.getTodo())
else:
self._passed = True
return ignored
def _ebDeferTestMethod(self, f, result):
todo = self.getTodo()
if todo is not None and todo.expected(f):
result.addExpectedFailure(self, f, todo)
elif f.check(self.failureException, FailTest):
result.addFailure(self, f)
elif f.check(KeyboardInterrupt):
result.addError(self, f)
result.stop()
elif f.check(SkipTest):
result.addSkip(self, self._getReason(f))
else:
result.addError(self, f)
def deferTearDown(self, ignored, result):
d = self._run('tearDown', result)
d.addErrback(self._ebDeferTearDown, result)
return d
def _ebDeferTearDown(self, failure, result):
result.addError(self, failure)
if failure.check(KeyboardInterrupt):
result.stop()
self._passed = False
def deferRunCleanups(self, ignored, result):
"""
Run any scheduled cleanups and report errors (if any to the result
object.
"""
d = self._runCleanups()
d.addCallback(self._cbDeferRunCleanups, result)
return d
def _cbDeferRunCleanups(self, cleanupResults, result):
for flag, failure in cleanupResults:
if flag == defer.FAILURE:
result.addError(self, failure)
if failure.check(KeyboardInterrupt):
result.stop()
self._passed = False
def _cleanUp(self, result):
try:
clean = util._Janitor(self, result).postCaseCleanup()
if not clean:
self._passed = False
except:
result.addError(self, failure.Failure())
self._passed = False
for error in self._observer.getErrors():
result.addError(self, error)
self._passed = False
self.flushLoggedErrors()
self._removeObserver()
if self._passed:
result.addSuccess(self)
def _classCleanUp(self, result):
try:
util._Janitor(self, result).postClassCleanup()
except:
result.addError(self, failure.Failure())
def _makeReactorMethod(self, name):
"""
Create a method which wraps the reactor method C{name}. The new
method issues a deprecation warning and calls the original.
"""
def _(*a, **kw):
warnings.warn("reactor.%s cannot be used inside unit tests. "
"In the future, using %s will fail the test and may "
"crash or hang the test run."
% (name, name),
stacklevel=2, category=DeprecationWarning)
return self._reactorMethods[name](*a, **kw)
return _
def _deprecateReactor(self, reactor):
"""
Deprecate C{iterate}, C{crash} and C{stop} on C{reactor}. That is,
each method is wrapped in a function that issues a deprecation
warning, then calls the original.
@param reactor: The Twisted reactor.
"""
self._reactorMethods = {}
for name in ['crash', 'iterate', 'stop']:
self._reactorMethods[name] = getattr(reactor, name)
setattr(reactor, name, self._makeReactorMethod(name))
def _undeprecateReactor(self, reactor):
"""
Restore the deprecated reactor methods. Undoes what
L{_deprecateReactor} did.
@param reactor: The Twisted reactor.
"""
for name, method in self._reactorMethods.iteritems():
setattr(reactor, name, method)
self._reactorMethods = {}
def _installObserver(self):
self._observer = _logObserver
self._observer._add()
def _removeObserver(self):
self._observer._remove()
def flushLoggedErrors(self, *errorTypes):
"""
Remove stored errors received from the log.
C{TestCase} stores each error logged during the run of the test and
reports them as errors during the cleanup phase (after C{tearDown}).
@param *errorTypes: If unspecifed, flush all errors. Otherwise, only
flush errors that match the given types.
@return: A list of failures that have been removed.
"""
return self._observer.flushErrors(*errorTypes)
def flushWarnings(self, offendingFunctions=None):
"""
Remove stored warnings from the list of captured warnings and return
them.
@param offendingFunctions: If C{None}, all warnings issued during the
currently running test will be flushed. Otherwise, only warnings
which I{point} to a function included in this list will be flushed.
All warnings include a filename and source line number; if these
parts of a warning point to a source line which is part of a
function, then the warning I{points} to that function.
@type offendingFunctions: L{NoneType} or L{list} of functions or methods.
@raise ValueError: If C{offendingFunctions} is not C{None} and includes
an object which is not a L{FunctionType} or L{MethodType} instance.
@return: A C{list}, each element of which is a C{dict} giving
information about one warning which was flushed by this call. The
keys of each C{dict} are:
- C{'message'}: The string which was passed as the I{message}
parameter to L{warnings.warn}.
- C{'category'}: The warning subclass which was passed as the
I{category} parameter to L{warnings.warn}.
- C{'filename'}: The name of the file containing the definition
of the code object which was C{stacklevel} frames above the
call to L{warnings.warn}, where C{stacklevel} is the value of
the C{stacklevel} parameter passed to L{warnings.warn}.
- C{'lineno'}: The source line associated with the active
instruction of the code object object which was C{stacklevel}
frames above the call to L{warnings.warn}, where
C{stacklevel} is the value of the C{stacklevel} parameter
passed to L{warnings.warn}.
"""
if offendingFunctions is None:
toFlush = self._warnings[:]
self._warnings[:] = []
else:
toFlush = []
for aWarning in self._warnings:
for aFunction in offendingFunctions:
if not isinstance(aFunction, (
types.FunctionType, types.MethodType)):
raise ValueError("%r is not a function or method" % (
aFunction,))
# inspect.getabsfile(aFunction) sometimes returns a
# filename which disagrees with the filename the warning
# system generates. This seems to be because a
# function's code object doesn't deal with source files
# being renamed. inspect.getabsfile(module) seems
# better (or at least agrees with the warning system
# more often), and does some normalization for us which
# is desirable. inspect.getmodule() is attractive, but
# somewhat broken in Python 2.3. See Python bug 4845.
aModule = sys.modules[aFunction.__module__]
filename = inspect.getabsfile(aModule)
if filename != os.path.normcase(aWarning.filename):
continue
lineStarts = list(_findlinestarts(aFunction.func_code))
first = lineStarts[0][1]
last = lineStarts[-1][1]
if not (first <= aWarning.lineno <= last):
continue
# The warning points to this function, flush it and move on
# to the next warning.
toFlush.append(aWarning)
break
# Remove everything which is being flushed.
map(self._warnings.remove, toFlush)
return [
{'message': w.message, 'category': w.category,
'filename': w.filename, 'lineno': w.lineno}
for w in toFlush]
def addCleanup(self, f, *args, **kwargs):
"""
Add the given function to a list of functions to be called after the
test has run, but before C{tearDown}.
Functions will be run in reverse order of being added. This helps
ensure that tear down complements set up.
The function C{f} may return a Deferred. If so, C{TestCase} will wait
until the Deferred has fired before proceeding to the next function.
"""
self._cleanups.append((f, args, kwargs))
def callDeprecated(self, version, f, *args, **kwargs):
"""
Call a function that was deprecated at a specific version.
@param version: The version that the function was deprecated in.
@param f: The deprecated function to call.
@return: Whatever the function returns.
"""
result = f(*args, **kwargs)
warningsShown = self.flushWarnings([self.callDeprecated])
if len(warningsShown) == 0:
self.fail('%r is not deprecated.' % (f,))
observedWarning = warningsShown[0]['message']
expectedWarning = getDeprecationWarningString(f, version)
self.assertEqual(expectedWarning, observedWarning)
return result
def _runCleanups(self):
"""
Run the cleanups added with L{addCleanup} in order.
@return: A C{Deferred} that fires when all cleanups are run.
"""
def _makeFunction(f, args, kwargs):
return lambda: f(*args, **kwargs)
callables = []
while len(self._cleanups) > 0:
f, args, kwargs = self._cleanups.pop()
callables.append(_makeFunction(f, args, kwargs))
return util._runSequentially(callables)
def patch(self, obj, attribute, value):
"""
Monkey patch an object for the duration of the test.
The monkey patch will be reverted at the end of the test using the
L{addCleanup} mechanism.
The L{MonkeyPatcher} is returned so that users can restore and
re-apply the monkey patch within their tests.
@param obj: The object to monkey patch.
@param attribute: The name of the attribute to change.
@param value: The value to set the attribute to.
@return: A L{monkey.MonkeyPatcher} object.
"""
monkeyPatch = monkey.MonkeyPatcher((obj, attribute, value))
monkeyPatch.patch()
self.addCleanup(monkeyPatch.restore)
return monkeyPatch
def runTest(self):
"""
If no C{methodName} argument is passed to the constructor, L{run} will
treat this method as the thing with the actual test inside.
"""
def run(self, result):
"""
Run the test case, storing the results in C{result}.
First runs C{setUp} on self, then runs the test method (defined in the
constructor), then runs C{tearDown}. Any of these may return
L{Deferred}s. After they complete, does some reactor cleanup.
@param result: A L{TestResult} object.
"""
log.msg("--> %s <--" % (self.id()))
from twisted.internet import reactor
new_result = itrial.IReporter(result, None)
if new_result is None:
result = PyUnitResultAdapter(result)
else:
result = new_result
self._timedOut = False
result.startTest(self)
if self.getSkip(): # don't run test methods that are marked as .skip
result.addSkip(self, self.getSkip())
result.stopTest(self)
return
self._installObserver()
# All the code inside runThunk will be run such that warnings emitted
# by it will be collected and retrievable by flushWarnings.
def runThunk():
self._passed = False
self._deprecateReactor(reactor)
try:
d = self.deferSetUp(None, result)
try:
self._wait(d)
finally:
self._cleanUp(result)
self._classCleanUp(result)
finally:
self._undeprecateReactor(reactor)
self._warnings = []
_collectWarnings(self._warnings.append, runThunk)
# Any collected warnings which the test method didn't flush get
# re-emitted so they'll be logged or show up on stdout or whatever.
for w in self.flushWarnings():
try:
warnings.warn_explicit(**w)
except:
result.addError(self, failure.Failure())
result.stopTest(self)
def _getReason(self, f):
if len(f.value.args) > 0:
reason = f.value.args[0]
else:
warnings.warn(("Do not raise unittest.SkipTest with no "
"arguments! Give a reason for skipping tests!"),
stacklevel=2)
reason = f
return reason
def getSkip(self):
"""
Return the skip reason set on this test, if any is set. Checks on the
instance first, then the class, then the module, then packages. As
soon as it finds something with a C{skip} attribute, returns that.
Returns C{None} if it cannot find anything. See L{TestCase} docstring
for more details.
"""
return util.acquireAttribute(self._parents, 'skip', None)
def getTodo(self):
"""
Return a L{Todo} object if the test is marked todo. Checks on the
instance first, then the class, then the module, then packages. As
soon as it finds something with a C{todo} attribute, returns that.
Returns C{None} if it cannot find anything. See L{TestCase} docstring
for more details.
"""
todo = util.acquireAttribute(self._parents, 'todo', None)
if todo is None:
return None
return makeTodo(todo)
def getTimeout(self):
"""
Returns the timeout value set on this test. Checks on the instance
first, then the class, then the module, then packages. As soon as it
finds something with a C{timeout} attribute, returns that. Returns
L{util.DEFAULT_TIMEOUT_DURATION} if it cannot find anything. See
L{TestCase} docstring for more details.
"""
timeout = util.acquireAttribute(self._parents, 'timeout',
util.DEFAULT_TIMEOUT_DURATION)
try:
return float(timeout)
except (ValueError, TypeError):
# XXX -- this is here because sometimes people will have methods
# called 'timeout', or set timeout to 'orange', or something
# Particularly, test_news.NewsTestCase and ReactorCoreTestCase
# both do this.
warnings.warn("'timeout' attribute needs to be a number.",
category=DeprecationWarning)
return util.DEFAULT_TIMEOUT_DURATION
def getSuppress(self):
"""
Returns any warning suppressions set for this test. Checks on the
instance first, then the class, then the module, then packages. As
soon as it finds something with a C{suppress} attribute, returns that.
Returns any empty list (i.e. suppress no warnings) if it cannot find
anything. See L{TestCase} docstring for more details.
"""
return util.acquireAttribute(self._parents, 'suppress', [])
def visit(self, visitor):
"""
Visit this test case. Call C{visitor} with C{self} as a parameter.
Deprecated in Twisted 8.0.
@param visitor: A callable which expects a single parameter: a test
case.
@return: None
"""
warnings.warn("Test visitors deprecated in Twisted 8.0",
category=DeprecationWarning)
visitor(self)
def mktemp(self):
"""Returns a unique name that may be used as either a temporary
directory or filename.
@note: you must call os.mkdir on the value returned from this
method if you wish to use it as a directory!
"""
MAX_FILENAME = 32 # some platforms limit lengths of filenames
base = os.path.join(self.__class__.__module__[:MAX_FILENAME],
self.__class__.__name__[:MAX_FILENAME],
self._testMethodName[:MAX_FILENAME])
if not os.path.exists(base):
os.makedirs(base)
dirname = tempfile.mkdtemp('', '', base)
return os.path.join(dirname, 'temp')
def _wait(self, d, running=_wait_is_running):
"""Take a Deferred that only ever callbacks. Block until it happens.
"""
from twisted.internet import reactor
if running:
raise RuntimeError("_wait is not reentrant")
results = []
def append(any):
if results is not None:
results.append(any)
def crash(ign):
if results is not None:
reactor.crash()
crash = utils.suppressWarnings(
crash, util.suppress(message=r'reactor\.crash cannot be used.*',
category=DeprecationWarning))
def stop():
reactor.crash()
stop = utils.suppressWarnings(
stop, util.suppress(message=r'reactor\.crash cannot be used.*',
category=DeprecationWarning))
running.append(None)
try:
d.addBoth(append)
if results:
# d might have already been fired, in which case append is
# called synchronously. Avoid any reactor stuff.
return
d.addBoth(crash)
reactor.stop = stop
try:
reactor.run()
finally:
del reactor.stop
# If the reactor was crashed elsewhere due to a timeout, hopefully
# that crasher also reported an error. Just return.
# _timedOut is most likely to be set when d has fired but hasn't
# completed its callback chain (see self._run)
if results or self._timedOut: #defined in run() and _run()
return
# If the timeout didn't happen, and we didn't get a result or
# a failure, then the user probably aborted the test, so let's
# just raise KeyboardInterrupt.
# FIXME: imagine this:
# web/test/test_webclient.py:
# exc = self.assertRaises(error.Error, wait, method(url))
#
# wait() will raise KeyboardInterrupt, and assertRaises will
# swallow it. Therefore, wait() raising KeyboardInterrupt is
# insufficient to stop trial. A suggested solution is to have
# this code set a "stop trial" flag, or otherwise notify trial
# that it should really try to stop as soon as possible.
raise KeyboardInterrupt()
finally:
results = None
running.pop()
class UnsupportedTrialFeature(Exception):
"""A feature of twisted.trial was used that pyunit cannot support."""
class PyUnitResultAdapter(object):
"""
Wrap a C{TestResult} from the standard library's C{unittest} so that it
supports the extended result types from Trial, and also supports
L{twisted.python.failure.Failure}s being passed to L{addError} and
L{addFailure}.
"""
def __init__(self, original):
"""
@param original: A C{TestResult} instance from C{unittest}.
"""
self.original = original
def _exc_info(self, err):
return util.excInfoOrFailureToExcInfo(err)
def startTest(self, method):
self.original.startTest(method)
def stopTest(self, method):
self.original.stopTest(method)
def addFailure(self, test, fail):
self.original.addFailure(test, self._exc_info(fail))
def addError(self, test, error):
self.original.addError(test, self._exc_info(error))
def _unsupported(self, test, feature, info):
self.original.addFailure(
test,
(UnsupportedTrialFeature,
UnsupportedTrialFeature(feature, info),
None))
def addSkip(self, test, reason):
"""
Report the skip as a failure.
"""
self._unsupported(test, 'skip', reason)
def addUnexpectedSuccess(self, test, todo):
"""
Report the unexpected success as a failure.
"""
self._unsupported(test, 'unexpected success', todo)
def addExpectedFailure(self, test, error):
"""
Report the expected failure (i.e. todo) as a failure.
"""
self._unsupported(test, 'expected failure', error)
def addSuccess(self, test):
self.original.addSuccess(test)
def upDownError(self, method, error, warn, printStatus):
pass
def suiteVisit(suite, visitor):
"""
Visit each test in C{suite} with C{visitor}.
Deprecated in Twisted 8.0.
@param visitor: A callable which takes a single argument, the L{TestCase}
instance to visit.
@return: None
"""
warnings.warn("Test visitors deprecated in Twisted 8.0",
category=DeprecationWarning)
for case in suite._tests:
visit = getattr(case, 'visit', None)
if visit is not None:
visit(visitor)
elif isinstance(case, pyunit.TestCase):
case = itrial.ITestCase(case)
case.visit(visitor)
elif isinstance(case, pyunit.TestSuite):
suiteVisit(case, visitor)
else:
case.visit(visitor)
class TestSuite(pyunit.TestSuite):
"""
Extend the standard library's C{TestSuite} with support for the visitor
pattern and a consistently overrideable C{run} method.
"""
visit = suiteVisit
def __call__(self, result):
return self.run(result)
def run(self, result):
"""
Call C{run} on every member of the suite.
"""
# we implement this because Python 2.3 unittest defines this code
# in __call__, whereas 2.4 defines the code in run.
for test in self._tests:
if result.shouldStop:
break
test(result)
return result
class TestDecorator(components.proxyForInterface(itrial.ITestCase,
"_originalTest")):
"""
Decorator for test cases.
@param _originalTest: The wrapped instance of test.
@type _originalTest: A provider of L{itrial.ITestCase}
"""
implements(itrial.ITestCase)
def __call__(self, result):
"""
Run the unit test.
@param result: A TestResult object.
"""
return self.run(result)
def run(self, result):
"""
Run the unit test.
@param result: A TestResult object.
"""
return self._originalTest.run(
reporter._AdaptedReporter(result, self.__class__))
def _clearSuite(suite):
"""
Clear all tests from C{suite}.
This messes with the internals of C{suite}. In particular, it assumes that
the suite keeps all of its tests in a list in an instance variable called
C{_tests}.
"""
suite._tests = []
def decorate(test, decorator):
"""
Decorate all test cases in C{test} with C{decorator}.
C{test} can be a test case or a test suite. If it is a test suite, then the
structure of the suite is preserved.
L{decorate} tries to preserve the class of the test suites it finds, but
assumes the presence of the C{_tests} attribute on the suite.
@param test: The C{TestCase} or C{TestSuite} to decorate.
@param decorator: A unary callable used to decorate C{TestCase}s.
@return: A decorated C{TestCase} or a C{TestSuite} containing decorated
C{TestCase}s.
"""
try:
tests = iter(test)
except TypeError:
return decorator(test)
# At this point, we know that 'test' is a test suite.
_clearSuite(test)
for case in tests:
test.addTest(decorate(case, decorator))
return test
class _PyUnitTestCaseAdapter(TestDecorator):
"""
Adapt from pyunit.TestCase to ITestCase.
"""
def visit(self, visitor):
"""
Deprecated in Twisted 8.0.
"""
warnings.warn("Test visitors deprecated in Twisted 8.0",
category=DeprecationWarning)
visitor(self)
class _BrokenIDTestCaseAdapter(_PyUnitTestCaseAdapter):
"""
Adapter for pyunit-style C{TestCase} subclasses that have undesirable id()
methods. That is L{pyunit.FunctionTestCase} and L{pyunit.DocTestCase}.
"""
def id(self):
"""
Return the fully-qualified Python name of the doctest.
"""
testID = self._originalTest.shortDescription()
if testID is not None:
return testID
return self._originalTest.id()
class _ForceGarbageCollectionDecorator(TestDecorator):
"""
Forces garbage collection to be run before and after the test. Any errors
logged during the post-test collection are added to the test result as
errors.
"""
def run(self, result):
gc.collect()
TestDecorator.run(self, result)
_logObserver._add()
gc.collect()
for error in _logObserver.getErrors():
result.addError(self, error)
_logObserver.flushErrors()
_logObserver._remove()
components.registerAdapter(
_PyUnitTestCaseAdapter, pyunit.TestCase, itrial.ITestCase)
components.registerAdapter(
_BrokenIDTestCaseAdapter, pyunit.FunctionTestCase, itrial.ITestCase)
_docTestCase = getattr(doctest, 'DocTestCase', None)
if _docTestCase:
components.registerAdapter(
_BrokenIDTestCaseAdapter, _docTestCase, itrial.ITestCase)
def _iterateTests(testSuiteOrCase):
"""
Iterate through all of the test cases in C{testSuiteOrCase}.
"""
try:
suite = iter(testSuiteOrCase)
except TypeError:
yield testSuiteOrCase
else:
for test in suite:
for subtest in _iterateTests(test):
yield subtest
# Support for Python 2.3
try:
iter(pyunit.TestSuite())
except TypeError:
# Python 2.3's TestSuite doesn't support iteration. Let's monkey patch it!
def __iter__(self):
return iter(self._tests)
pyunit.TestSuite.__iter__ = __iter__
class _SubTestCase(TestCase):
def __init__(self):
TestCase.__init__(self, 'run')
_inst = _SubTestCase()
def _deprecate(name):
"""
Internal method used to deprecate top-level assertions. Do not use this.
"""
def _(*args, **kwargs):
warnings.warn("unittest.%s is deprecated. Instead use the %r "
"method on unittest.TestCase" % (name, name),
stacklevel=2, category=DeprecationWarning)
return getattr(_inst, name)(*args, **kwargs)
return _
_assertions = ['fail', 'failUnlessEqual', 'failIfEqual', 'failIfEquals',
'failUnless', 'failUnlessIdentical', 'failUnlessIn',
'failIfIdentical', 'failIfIn', 'failIf',
'failUnlessAlmostEqual', 'failIfAlmostEqual',
'failUnlessRaises', 'assertApproximates',
'assertFailure', 'failUnlessSubstring', 'failIfSubstring',
'assertAlmostEqual', 'assertAlmostEquals',
'assertNotAlmostEqual', 'assertNotAlmostEquals', 'assertEqual',
'assertEquals', 'assertNotEqual', 'assertNotEquals',
'assertRaises', 'assert_', 'assertIdentical',
'assertNotIdentical', 'assertIn', 'assertNotIn',
'failUnlessFailure', 'assertSubstring', 'assertNotSubstring']
for methodName in _assertions:
globals()[methodName] = _deprecate(methodName)
__all__ = ['TestCase', 'FailTest', 'SkipTest']
|
apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.