repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
nirvn/QGIS | tests/src/python/test_qgsvectorlayercache.py | 22 | 3911 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsVectorLayerCache.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '08/06/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
import os
from qgis.PyQt.QtCore import QVariant, Qt
from qgis.PyQt.QtGui import QPainter
from qgis.PyQt.QtXml import QDomDocument
from qgis.core import (QgsWkbTypes,
QgsVectorLayer,
QgsVectorLayerCache,
QgsRectangle,
QgsFeature,
QgsFeatureRequest,
QgsGeometry,
QgsPointXY,
QgsField,
QgsFields,
QgsCoordinateReferenceSystem,
QgsProject,
QgsPoint,
NULL)
from qgis.testing import start_app, unittest
from featuresourcetestbase import FeatureSourceTestCase
from utilities import unitTestDataPath
start_app()
class TestQgsVectorLayerCache(unittest.TestCase, FeatureSourceTestCase):
@classmethod
def getSource(cls):
cache = QgsVectorLayerCache(cls.vl, 100)
return cache
@classmethod
def setUpClass(cls):
"""Run before all tests"""
# Create test layer for FeatureSourceTestCase
cls.vl = QgsVectorLayer(
'Point?crs=epsg:4326&field=pk:integer&field=cnt:integer&field=name:string(0)&field=name2:string(0)&field=num_char:string&key=pk',
'test', 'memory')
assert (cls.vl.isValid())
f1 = QgsFeature(5)
f1.setAttributes([5, -200, NULL, 'NuLl', '5'])
f1.setGeometry(QgsGeometry.fromWkt('Point (-71.123 78.23)'))
f2 = QgsFeature(3)
f2.setAttributes([3, 300, 'Pear', 'PEaR', '3'])
f3 = QgsFeature(1)
f3.setAttributes([1, 100, 'Orange', 'oranGe', '1'])
f3.setGeometry(QgsGeometry.fromWkt('Point (-70.332 66.33)'))
f4 = QgsFeature(2)
f4.setAttributes([2, 200, 'Apple', 'Apple', '2'])
f4.setGeometry(QgsGeometry.fromWkt('Point (-68.2 70.8)'))
f5 = QgsFeature(4)
f5.setAttributes([4, 400, 'Honey', 'Honey', '4'])
f5.setGeometry(QgsGeometry.fromWkt('Point (-65.32 78.3)'))
assert cls.vl.dataProvider().addFeatures([f1, f2, f3, f4, f5])
cls.source = QgsVectorLayerCache(cls.vl, 100)
def testGetFeaturesSubsetAttributes2(self):
""" Override and skip this QgsFeatureSource test. We are using a memory provider, and it's actually more efficient for the memory provider to return
its features as direct copies (due to implicit sharing of QgsFeature)
"""
pass
def testGetFeaturesNoGeometry(self):
""" Override and skip this QgsFeatureSource test. We are using a memory provider, and it's actually more efficient for the memory provider to return
its features as direct copies (due to implicit sharing of QgsFeature)
"""
pass
def testUniqueValues(self):
""" Skip unique values test - not implemented by the cache (yet)
"""
pass
def testMinimumValue(self):
""" Skip min values test - not implemented by the cache (yet)
"""
pass
def testMaximumValue(self):
""" Skip max values test - not implemented by the cache (yet)
"""
pass
def testAllFeatureIds(self):
""" Skip allFeatureIds test - not implemented by the cache (yet)
"""
pass
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
esakellari/root | interpreter/llvm/src/tools/clang/www/builtins.py | 99 | 6633 | #!/usr/bin/env python
import sys, fileinput
err=0
# Giant associative set of builtin->intrinsic mappings where clang doesn't
# implement the builtin since the vector operation works by default.
repl_map = {
'__builtin_ia32_addps': '_mm_add_ps',
'__builtin_ia32_addsd': '_mm_add_sd',
'__builtin_ia32_addpd': '_mm_add_pd',
'__builtin_ia32_addss': '_mm_add_ss',
'__builtin_ia32_paddb128': '_mm_add_epi8',
'__builtin_ia32_paddw128': '_mm_add_epi16',
'__builtin_ia32_paddd128': '_mm_add_epi32',
'__builtin_ia32_paddq128': '_mm_add_epi64',
'__builtin_ia32_subps': '_mm_sub_ps',
'__builtin_ia32_subsd': '_mm_sub_sd',
'__builtin_ia32_subpd': '_mm_sub_pd',
'__builtin_ia32_subss': '_mm_sub_ss',
'__builtin_ia32_psubb128': '_mm_sub_epi8',
'__builtin_ia32_psubw128': '_mm_sub_epi16',
'__builtin_ia32_psubd128': '_mm_sub_epi32',
'__builtin_ia32_psubq128': '_mm_sub_epi64',
'__builtin_ia32_mulsd': '_mm_mul_sd',
'__builtin_ia32_mulpd': '_mm_mul_pd',
'__builtin_ia32_mulps': '_mm_mul_ps',
'__builtin_ia32_mulss': '_mm_mul_ss',
'__builtin_ia32_pmullw128': '_mm_mullo_epi16',
'__builtin_ia32_divsd': '_mm_div_sd',
'__builtin_ia32_divpd': '_mm_div_pd',
'__builtin_ia32_divps': '_mm_div_ps',
'__builtin_ia32_subss': '_mm_div_ss',
'__builtin_ia32_andpd': '_mm_and_pd',
'__builtin_ia32_andps': '_mm_and_ps',
'__builtin_ia32_pand128': '_mm_and_si128',
'__builtin_ia32_andnpd': '_mm_andnot_pd',
'__builtin_ia32_andnps': '_mm_andnot_ps',
'__builtin_ia32_pandn128': '_mm_andnot_si128',
'__builtin_ia32_orpd': '_mm_or_pd',
'__builtin_ia32_orps': '_mm_or_ps',
'__builtin_ia32_por128': '_mm_or_si128',
'__builtin_ia32_xorpd': '_mm_xor_pd',
'__builtin_ia32_xorps': '_mm_xor_ps',
'__builtin_ia32_pxor128': '_mm_xor_si128',
'__builtin_ia32_cvtps2dq': '_mm_cvtps_epi32',
'__builtin_ia32_cvtsd2ss': '_mm_cvtsd_ss',
'__builtin_ia32_cvtsi2sd': '_mm_cvtsi32_sd',
'__builtin_ia32_cvtss2sd': '_mm_cvtss_sd',
'__builtin_ia32_cvttsd2si': '_mm_cvttsd_si32',
'__builtin_ia32_vec_ext_v2df': '_mm_cvtsd_f64',
'__builtin_ia32_loadhpd': '_mm_loadh_pd',
'__builtin_ia32_loadlpd': '_mm_loadl_pd',
'__builtin_ia32_loadlv4si': '_mm_loadl_epi64',
'__builtin_ia32_cmpeqps': '_mm_cmpeq_ps',
'__builtin_ia32_cmpltps': '_mm_cmplt_ps',
'__builtin_ia32_cmpleps': '_mm_cmple_ps',
'__builtin_ia32_cmpgtps': '_mm_cmpgt_ps',
'__builtin_ia32_cmpgeps': '_mm_cmpge_ps',
'__builtin_ia32_cmpunordps': '_mm_cmpunord_ps',
'__builtin_ia32_cmpneqps': '_mm_cmpneq_ps',
'__builtin_ia32_cmpnltps': '_mm_cmpnlt_ps',
'__builtin_ia32_cmpnleps': '_mm_cmpnle_ps',
'__builtin_ia32_cmpngtps': '_mm_cmpngt_ps',
'__builtin_ia32_cmpordps': '_mm_cmpord_ps',
'__builtin_ia32_cmpeqss': '_mm_cmpeq_ss',
'__builtin_ia32_cmpltss': '_mm_cmplt_ss',
'__builtin_ia32_cmpless': '_mm_cmple_ss',
'__builtin_ia32_cmpunordss': '_mm_cmpunord_ss',
'__builtin_ia32_cmpneqss': '_mm_cmpneq_ss',
'__builtin_ia32_cmpnltss': '_mm_cmpnlt_ss',
'__builtin_ia32_cmpnless': '_mm_cmpnle_ss',
'__builtin_ia32_cmpngtss': '_mm_cmpngt_ss',
'__builtin_ia32_cmpngess': '_mm_cmpnge_ss',
'__builtin_ia32_cmpordss': '_mm_cmpord_ss',
'__builtin_ia32_movss': '_mm_move_ss',
'__builtin_ia32_movsd': '_mm_move_sd',
'__builtin_ia32_movhlps': '_mm_movehl_ps',
'__builtin_ia32_movlhps': '_mm_movelh_ps',
'__builtin_ia32_movqv4si': '_mm_move_epi64',
'__builtin_ia32_unpckhps': '_mm_unpackhi_ps',
'__builtin_ia32_unpckhpd': '_mm_unpackhi_pd',
'__builtin_ia32_punpckhbw128': '_mm_unpackhi_epi8',
'__builtin_ia32_punpckhwd128': '_mm_unpackhi_epi16',
'__builtin_ia32_punpckhdq128': '_mm_unpackhi_epi32',
'__builtin_ia32_punpckhqdq128': '_mm_unpackhi_epi64',
'__builtin_ia32_unpcklps': '_mm_unpacklo_ps',
'__builtin_ia32_unpcklpd': '_mm_unpacklo_pd',
'__builtin_ia32_punpcklbw128': '_mm_unpacklo_epi8',
'__builtin_ia32_punpcklwd128': '_mm_unpacklo_epi16',
'__builtin_ia32_punpckldq128': '_mm_unpacklo_epi32',
'__builtin_ia32_punpcklqdq128': '_mm_unpacklo_epi64',
'__builtin_ia32_cmpeqpd': '_mm_cmpeq_pd',
'__builtin_ia32_cmpltpd': '_mm_cmplt_pd',
'__builtin_ia32_cmplepd': '_mm_cmple_pd',
'__builtin_ia32_cmpgtpd': '_mm_cmpgt_pd',
'__builtin_ia32_cmpgepd': '_mm_cmpge_pd',
'__builtin_ia32_cmpunordpd': '_mm_cmpunord_pd',
'__builtin_ia32_cmpneqpd': '_mm_cmpneq_pd',
'__builtin_ia32_cmpnltpd': '_mm_cmpnlt_pd',
'__builtin_ia32_cmpnlepd': '_mm_cmpnle_pd',
'__builtin_ia32_cmpngtpd': '_mm_cmpngt_pd',
'__builtin_ia32_cmpngepd': '_mm_cmpnge_pd',
'__builtin_ia32_cmpordpd': '_mm_cmpord_pd',
'__builtin_ia32_cmpeqsd': '_mm_cmpeq_sd',
'__builtin_ia32_cmpltsd': '_mm_cmplt_sd',
'__builtin_ia32_cmplesd': '_mm_cmple_sd',
'__builtin_ia32_cmpunordsd': '_mm_cmpunord_sd',
'__builtin_ia32_cmpneqsd': '_mm_cmpneq_sd',
'__builtin_ia32_cmpnltsd': '_mm_cmpnlt_sd',
'__builtin_ia32_cmpnlesd': '_mm_cmpnle_sd',
'__builtin_ia32_cmpordsd': '_mm_cmpord_sd',
'__builtin_ia32_cvtsi642ss': '_mm_cvtsi64_ss',
'__builtin_ia32_cvttss2si64': '_mm_cvtss_si64',
'__builtin_ia32_shufps': '_mm_shuffle_ps',
'__builtin_ia32_shufpd': '_mm_shuffle_pd',
'__builtin_ia32_pshufhw': '_mm_shufflehi_epi16',
'__builtin_ia32_pshuflw': '_mm_shufflelo_epi16',
'__builtin_ia32_pshufd': '_mm_shuffle_epi32',
'__builtin_ia32_movshdup': '_mm_movehdup_ps',
'__builtin_ia32_movsldup': '_mm_moveldup_ps',
'__builtin_ia32_maxps': '_mm_max_ps',
'__builtin_ia32_pslldi128': '_mm_slli_epi32',
'__builtin_ia32_vec_set_v16qi': '_mm_insert_epi8',
'__builtin_ia32_vec_set_v8hi': '_mm_insert_epi16',
'__builtin_ia32_vec_set_v4si': '_mm_insert_epi32',
'__builtin_ia32_vec_set_v2di': '_mm_insert_epi64',
'__builtin_ia32_vec_set_v4hi': '_mm_insert_pi16',
'__builtin_ia32_vec_ext_v16qi': '_mm_extract_epi8',
'__builtin_ia32_vec_ext_v8hi': '_mm_extract_epi16',
'__builtin_ia32_vec_ext_v4si': '_mm_extract_epi32',
'__builtin_ia32_vec_ext_v2di': '_mm_extract_epi64',
'__builtin_ia32_vec_ext_v4hi': '_mm_extract_pi16',
'__builtin_ia32_vec_ext_v4sf': '_mm_extract_ps'
}
# Special unhandled cases:
# __builtin_ia32_vec_ext_*(__P, idx) -> _mm_store_sd/_mm_storeh_pd
# depending on index. No abstract insert/extract for these oddly.
unhandled = [
'__builtin_ia32_vec_ext_v2df',
'__builtin_ia32_vec_ext_v2si',
]
def report_repl(builtin, repl):
sys.stderr.write("%s:%d: x86 builtin %s used, replaced with %s\n" % (fileinput.filename(), fileinput.filelineno(), builtin, repl))
def report_cant(builtin):
sys.stderr.write("%s:%d: x86 builtin %s used, too many replacements\n" % (fileinput.filename(), fileinput.filelineno(), builtin))
for line in fileinput.input(inplace=1):
for builtin, repl in repl_map.iteritems():
if builtin in line:
line = line.replace(builtin, repl)
report_repl(builtin, repl)
for unh in unhandled:
if unh in line:
report_cant(unh)
sys.stdout.write(line)
sys.exit(err)
| lgpl-2.1 |
hamiltont/CouchPotatoServer | couchpotato/core/notifications/email/__init__.py | 7 | 1913 | from .main import Email
def start():
return Email()
config = [{
'name': 'email',
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'email',
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
},
{
'name': 'from',
'label': 'Send e-mail from',
},
{
'name': 'to',
'label': 'Send e-mail to',
},
{
'name': 'smtp_server',
'label': 'SMTP server',
},
{ 'name': 'smtp_port',
'label': 'SMTP server port',
'default': '25',
'type': 'int',
},
{
'name': 'ssl',
'label': 'Enable SSL',
'default': 0,
'type': 'bool',
},
{
'name': 'starttls',
'label': 'Enable StartTLS',
'default': 0,
'type': 'bool',
},
{
'name': 'smtp_user',
'label': 'SMTP user',
},
{
'name': 'smtp_pass',
'label': 'SMTP password',
'type': 'password',
},
{
'name': 'on_snatch',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Also send message when movie is snatched.',
},
],
}
],
}]
| gpl-3.0 |
MartinEnder/erpnext-de | erpnext/setup/setup_wizard/test_setup_data.py | 52 | 159337 | from __future__ import unicode_literals
args = {
"attach_letterhead": "erpnext.jpg,data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEASABIAAD/4gxYSUNDX1BST0ZJTEUAAQEAAAxITGlubwIQAABtbnRyUkdCIFhZWiAHzgACAAkABgAxAABhY3NwTVNGVAAAAABJRUMgc1JHQgAAAAAAAAAAAAAAAAAA9tYAAQAAAADTLUhQICAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABFjcHJ0AAABUAAAADNkZXNjAAABhAAAAGx3dHB0AAAB8AAAABRia3B0AAACBAAAABRyWFlaAAACGAAAABRnWFlaAAACLAAAABRiWFlaAAACQAAAABRkbW5kAAACVAAAAHBkbWRkAAACxAAAAIh2dWVkAAADTAAAAIZ2aWV3AAAD1AAAACRsdW1pAAAD+AAAABRtZWFzAAAEDAAAACR0ZWNoAAAEMAAAAAxyVFJDAAAEPAAACAxnVFJDAAAEPAAACAxiVFJDAAAEPAAACAx0ZXh0AAAAAENvcHlyaWdodCAoYykgMTk5OCBIZXdsZXR0LVBhY2thcmQgQ29tcGFueQAAZGVzYwAAAAAAAAASc1JHQiBJRUM2MTk2Ni0yLjEAAAAAAAAAAAAAABJzUkdCIElFQzYxOTY2LTIuMQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAWFlaIAAAAAAAAPNRAAEAAAABFsxYWVogAAAAAAAAAAAAAAAAAAAAAFhZWiAAAAAAAABvogAAOPUAAAOQWFlaIAAAAAAAAGKZAAC3hQAAGNpYWVogAAAAAAAAJKAAAA+EAAC2z2Rlc2MAAAAAAAAAFklFQyBodHRwOi8vd3d3LmllYy5jaAAAAAAAAAAAAAAAFklFQyBodHRwOi8vd3d3LmllYy5jaAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABkZXNjAAAAAAAAAC5JRUMgNjE5NjYtMi4xIERlZmF1bHQgUkdCIGNvbG91ciBzcGFjZSAtIHNSR0IAAAAAAAAAAAAAAC5JRUMgNjE5NjYtMi4xIERlZmF1bHQgUkdCIGNvbG91ciBzcGFjZSAtIHNSR0IAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZGVzYwAAAAAAAAAsUmVmZXJlbmNlIFZpZXdpbmcgQ29uZGl0aW9uIGluIElFQzYxOTY2LTIuMQAAAAAAAAAAAAAALFJlZmVyZW5jZSBWaWV3aW5nIENvbmRpdGlvbiBpbiBJRUM2MTk2Ni0yLjEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHZpZXcAAAAAABOk/gAUXy4AEM8UAAPtzAAEEwsAA1yeAAAAAVhZWiAAAAAAAEwJVgBQAAAAVx/nbWVhcwAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAo8AAAACc2lnIAAAAABDUlQgY3VydgAAAAAAAAQAAAAABQAKAA8AFAAZAB4AIwAoAC0AMgA3ADsAQABFAEoATwBUAFkAXgBjAGgAbQByAHcAfACBAIYAiwCQAJUAmgCfAKQAqQCuALIAtwC8AMEAxgDLANAA1QDbAOAA5QDrAPAA9gD7AQEBBwENARMBGQEfASUBKwEyATgBPgFFAUwBUgFZAWABZwFuAXUBfAGDAYsBkgGaAaEBqQGxAbkBwQHJAdEB2QHhAekB8gH6AgMCDAIUAh0CJgIvAjgCQQJLAlQCXQJnAnECegKEAo4CmAKiAqwCtgLBAssC1QLgAusC9QMAAwsDFgMhAy0DOANDA08DWgNmA3IDfgOKA5YDogOuA7oDxwPTA+AD7AP5BAYEEwQgBC0EOwRIBFUEYwRxBH4EjASaBKgEtgTEBNME4QTwBP4FDQUcBSsFOgVJBVgFZwV3BYYFlgWmBbUFxQXVBeUF9gYGBhYGJwY3BkgGWQZqBnsGjAadBq8GwAbRBuMG9QcHBxkHKwc9B08HYQd0B4YHmQesB78H0gflB/gICwgfCDIIRghaCG4IggiWCKoIvgjSCOcI+wkQCSUJOglPCWQJeQmPCaQJugnPCeUJ+woRCicKPQpUCmoKgQqYCq4KxQrcCvMLCwsiCzkLUQtpC4ALmAuwC8gL4Qv5DBIMKgxDDFwMdQyODKcMwAzZDPMNDQ0mDUANWg10DY4NqQ3DDd4N+A4TDi4OSQ5kDn8Omw62DtIO7g8JDyUPQQ9eD3oPlg+zD88P7BAJECYQQxBhEH4QmxC5ENcQ9RETETERTxFtEYwRqhHJEegSBxImEkUSZBKEEqMSwxLjEwMTIxNDE2MTgxOkE8UT5RQGFCcUSRRqFIsUrRTOFPAVEhU0FVYVeBWbFb0V4BYDFiYWSRZsFo8WshbWFvoXHRdBF2UXiReuF9IX9xgbGEAYZRiKGK8Y1Rj6GSAZRRlrGZEZtxndGgQaKhpRGncanhrFGuwbFBs7G2MbihuyG9ocAhwqHFIcexyjHMwc9R0eHUcdcB2ZHcMd7B4WHkAeah6UHr4e6R8THz4faR+UH78f6iAVIEEgbCCYIMQg8CEcIUghdSGhIc4h+yInIlUigiKvIt0jCiM4I2YjlCPCI/AkHyRNJHwkqyTaJQklOCVoJZclxyX3JicmVyaHJrcm6CcYJ0kneierJ9woDSg/KHEooijUKQYpOClrKZ0p0CoCKjUqaCqbKs8rAis2K2krnSvRLAUsOSxuLKIs1y0MLUEtdi2rLeEuFi5MLoIuty7uLyQvWi+RL8cv/jA1MGwwpDDbMRIxSjGCMbox8jIqMmMymzLUMw0zRjN/M7gz8TQrNGU0njTYNRM1TTWHNcI1/TY3NnI2rjbpNyQ3YDecN9c4FDhQOIw4yDkFOUI5fzm8Ofk6Njp0OrI67zstO2s7qjvoPCc8ZTykPOM9Ij1hPaE94D4gPmA+oD7gPyE/YT+iP+JAI0BkQKZA50EpQWpBrEHuQjBCckK1QvdDOkN9Q8BEA0RHRIpEzkUSRVVFmkXeRiJGZ0arRvBHNUd7R8BIBUhLSJFI10kdSWNJqUnwSjdKfUrESwxLU0uaS+JMKkxyTLpNAk1KTZNN3E4lTm5Ot08AT0lPk0/dUCdQcVC7UQZRUFGbUeZSMVJ8UsdTE1NfU6pT9lRCVI9U21UoVXVVwlYPVlxWqVb3V0RXklfgWC9YfVjLWRpZaVm4WgdaVlqmWvVbRVuVW+VcNVyGXNZdJ114XcleGl5sXr1fD19hX7NgBWBXYKpg/GFPYaJh9WJJYpxi8GNDY5dj62RAZJRk6WU9ZZJl52Y9ZpJm6Gc9Z5Nn6Wg/aJZo7GlDaZpp8WpIap9q92tPa6dr/2xXbK9tCG1gbbluEm5rbsRvHm94b9FwK3CGcOBxOnGVcfByS3KmcwFzXXO4dBR0cHTMdSh1hXXhdj52m3b4d1Z3s3gReG54zHkqeYl553pGeqV7BHtje8J8IXyBfOF9QX2hfgF+Yn7CfyN/hH/lgEeAqIEKgWuBzYIwgpKC9INXg7qEHYSAhOOFR4Wrhg6GcobXhzuHn4gEiGmIzokziZmJ/opkisqLMIuWi/yMY4zKjTGNmI3/jmaOzo82j56QBpBukNaRP5GokhGSepLjk02TtpQglIqU9JVflcmWNJaflwqXdZfgmEyYuJkkmZCZ/JpomtWbQpuvnByciZz3nWSd0p5Anq6fHZ+Ln/qgaaDYoUehtqImopajBqN2o+akVqTHpTilqaYapoum/adup+CoUqjEqTepqaocqo+rAqt1q+msXKzQrUStuK4trqGvFq+LsACwdbDqsWCx1rJLssKzOLOutCW0nLUTtYq2AbZ5tvC3aLfguFm40blKucK6O7q1uy67p7whvJu9Fb2Pvgq+hL7/v3q/9cBwwOzBZ8Hjwl/C28NYw9TEUcTOxUvFyMZGxsPHQce/yD3IvMk6ybnKOMq3yzbLtsw1zLXNNc21zjbOts83z7jQOdC60TzRvtI/0sHTRNPG1EnUy9VO1dHWVdbY11zX4Nhk2OjZbNnx2nba+9uA3AXcit0Q3ZbeHN6i3ynfr+A24L3hROHM4lPi2+Nj4+vkc+T85YTmDeaW5x/nqegy6LzpRunQ6lvq5etw6/vshu0R7ZzuKO6070DvzPBY8OXxcvH/8ozzGfOn9DT0wvVQ9d72bfb794r4Gfio+Tj5x/pX+uf7d/wH/Jj9Kf26/kv+3P9t////4QDKRXhpZgAATU0AKgAAAAgABwESAAMAAAABAAEAAAEaAAUAAAABAAAAYgEbAAUAAAABAAAAagEoAAMAAAABAAIAAAExAAIAAAARAAAAcgEyAAIAAAAUAAAAhIdpAAQAAAABAAAAmAAAAAAAAABIAAAAAQAAAEgAAAABUGl4ZWxtYXRvciAyLjIuMQAAMjAxMzowOToyNyAxODowOTo0OAAAA6ABAAMAAAABAAEAAKACAAQAAAABAAAEOqADAAQAAAABAAABrQAAAAD/4QJlaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wLwA8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIiB4OnhtcHRrPSJYTVAgQ29yZSA1LjEuMiI+CiAgIDxyZGY6UkRGIHhtbG5zOnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+CiAgICAgIDxyZGY6RGVzY3JpcHRpb24gcmRmOmFib3V0PSIiCiAgICAgICAgICAgIHhtbG5zOnhtcD0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wLyI+CiAgICAgICAgIDx4bXA6TW9kaWZ5RGF0ZT4yMDEzLTA5LTI3VDE4OjA5OjQ4PC94bXA6TW9kaWZ5RGF0ZT4KICAgICAgICAgPHhtcDpDcmVhdG9yVG9vbD5QaXhlbG1hdG9yIDIuMi4xPC94bXA6Q3JlYXRvclRvb2w+CiAgICAgIDwvcmRmOkRlc2NyaXB0aW9uPgogICAgICA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIgogICAgICAgICAgICB4bWxuczpkYz0iaHR0cDovL3B1cmwub3JnL2RjL2VsZW1lbnRzLzEuMS8iPgogICAgICAgICA8ZGM6c3ViamVjdD4KICAgICAgICAgICAgPHJkZjpCYWcvPgogICAgICAgICA8L2RjOnN1YmplY3Q+CiAgICAgIDwvcmRmOkRlc2NyaXB0aW9uPgogICA8L3JkZjpSREY+CjwveDp4bXBtZXRhPgr/2wBDAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQH/2wBDAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQH/wAARCAGtBDoDAREAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD+/igAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgBGZVBZmCgAsxYgAKvLMSeAAOSTwO9AHxV4x/4KU/8E6Ph5rl74Y+IH7ff7FPgbxLps8ttqPh7xj+1R8C/DOuWFzA2ya3vdJ1rx3ZX9rPC/ySxT28ckbHa6g8UAcv/wAPYf8Agln/ANJK/wBgH/xMj9nX8f8Amo1ACf8AD2H/AIJZf9JLP2AP/EyP2de//dRu+f1oAX/h7D/wSz/6SV/sA/8AiZH7Ov8A88agA/4ew/8ABLP/AKSV/sA/+Jkfs7f/ADxqAD/h7D/wSz7/APBSv9gH/wATI/Z1/wDnjUAH/D2H/gln/wBJK/2AeOv/ABmR+zrx/wCZGoAP+HsP/BLP/pJX+wD/AOJkfs6//PGoA+hvg1+1N+zH+0al7J+z3+0b8B/jummxLcai/wAGvi98PviglhA7KqTXreCPEOuC1idnVVknKIzMoDEsMgHvHXmgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAQkKCx6AEn6Dk0Af5wP/B1n/wAFovjN4n/aI8a/8E0v2evHOs+APgv8I7LRtO/aI1jwfqt7pGs/GL4ka/olh4ivPAmra1YPbXx+HHgDSdW03StS8L2s9taeI/Gs2vt4ni1Wz0LQIbUA/iMbnOV/75VRnkf3QM/U89SeaAGjrnB6senXI/yPegAHB6HkL+HH+c/jQApJOcqeAR9ckd/8+tAC55xg9euPf/Jz/wDroARuexPH/sw4/HH5c80AGfvHDc+31/yT7/mAL3A56k5xx/F/n/8AXQB6F8Kfi18TfgZ4/wDDHxU+Dvjvxb8MviN4N1ODV/C/jbwNruoeGfE+h39vIHjnsNY0qa3uowwXy7i3leW0u4Ge2vLe4tpZYmAP9ef/AIN/v+CpWs/8FUP2FdM+JXxLTS7b9oX4P+Kp/g98dl0i1i03T/EviPTdG0zW/DvxM03R7eNLXSbP4heG9St77UdPsUt9MsvGGm+LLPRrKz0a3sLaIA/cmgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgBCMgg9Dwfcd/wA6AP8AFM/4LVzzXH/BWv8A4KLvPI8rr+2J8d4VZzkiK28a6hbW8YP92GCGKJB/CiKO1AH5gc4P178Dkr1B555+vP8AeoAQZz/30Oo3Hjue+P0oAXn+R65PQc8fr/eHSgA556dPoOvcdc46n8KADnrjv3Pqf5eg659aAA5//Wf9rv6jB/DJHegBOfm6Z4789O/9emRmgBecj6nvx1bt6/5NABzx069zk/e556env2oA/wBDj/gx5mmf4Yf8FErdpGMEPjv9mqeKInKRzT+GPi/FPIo7PLHbQI57rEg/hoA/u7oAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAP8AFF/4LS/8paf+CjPr/wANkfH3/wBTvVPf/wDX60AfmMc4Of8A638PU8H1/wA4oAQZyPXLc8dcc9+oP4GgAGc47kLnt0A98nvnj/64ApyScjsR147d+OvvjjmgAyc49we3r9c/Q9c8HrQAN7+/6sMDr7df60AHPzevGenp356EfzPXFABzkdcZPp1+b3z/AJ9aADnAHvknj+96e/8AkmgD/Q0/4Md/+Sb/APBRf/sdf2ZP/Ub+M1AH939ABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/ijf8ABaX/AJS0/wDBRn/s8j4+8/8Ac+ap3/zzQB+Y3+Prx1Ht/TqT60AIM+p/i53c+nPH5dcGgBec9+MdT+PoefXnoe+RQAvPX29ePr06/gaADBz1PXPX8x06fj+XWgBDn3/P1P8AnHXjIoAOeRz25z/9bvxnPqT9QA7g5PUjrx3P+fT8KADkfn1J56/5789OtAH+hn/wY7/8k3/4KL/9jr+zJ/6jfxmoA/u/oAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAP8UX/AILS/wDKWn/gozxn/jMj4+9s/wDM+ap+fNAH5jEdeP8Ax0H+76/j+vpQAgHPT1/g/L3/AK9jQAd+hxx/APxoAO7cHv8AwD1/X/JoACOenf8AuZH59f8APFACkdePX+EH+L/D9OaAE7Hj0/gH4/8A1/060ALjnoOp/g7c9+n4/ie9ABj2/wDHPf8AT8e3PWgD/Q0/4Md/+Sb/APBRf/sdf2ZP/Ub+M3+fegD+7+gAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoA/xRf+C0v/KWn/goz/2eR8fffr481X/P55oA/Mc85P8AMH1HXuenP/6qAGjqOB1bnB75xn2Pbnp34NAC4Oc46Y7ZPAHTk/j+mTQAYPXA5z29wfm75/Dr3oAMHrhfXpz/ACz75657UABHt+nqwPPf/HnPNAAAeRgdeuODj/PPpk0AHcH/AGj2OepOeme/596ADHTgdR7H72eOvGPf1zQB/oaf8GO//JN/+Ci//Y6/syf+o38ZqAP7v6ACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAMzWda0jw7pOp69r+qadomiaLp97q+saxq99a6ZpWk6VptvJeajqep6jfTQWWn6fY2sUtzeX15PDa2sEbzTyxxqWAB+H/AMUv+DlH/giz8JfE194T139trwp4k1bTbmW0vp/hl4A+LfxT8PRzRMysLXxf4D8C654U1WLKnFxo+s6hbE8CY9wDzP8A4io/+CIf/R2mu/8AiPH7RX/zsqAD/iKj/wCCIf8A0dprv/iPH7RX/wA7KgA/4io/+CIf/R2mu/8AiPH7RX/zsqAD/iKj/wCCIf8A0dprv/iPH7RX/wA7KgA/4io/+CIf/R2mu/8AiPH7RX/zsqAP8xH/AIKa/Gj4d/tF/wDBQn9s747/AAi1yTxN8MPi7+0n8XPiF4B8QTaVquhzaz4U8U+LL/VNE1J9H12007WdNa7sp45WstTsbS+t9xjubeGUFFAPho55+uc8e3v25/TnPUATJz27+mef+Bfn/XrQADIPYZx6c8fX/H+VAC5OfpnnjPb/AGvz6dqADJz/APqzj/vr8KAA5/rzj1Hv6fTmgBMnnp79P1+b+Z/+sALzkfU+nqff8+M9evQgCZP689P73+91/rn6gA/sV/4NaP8AgrL+wp/wTU8E/tm6P+2L8YNQ+F+o/FzxR8DtR8BwWXw3+JPjwazZ+C9F+Jdn4ilkl8BeFvEcWmmyuPEOkxqmpvaPdfaS9os6wzmIA/q//wCIqP8A4Ih/9Haa7/4jx+0V/wDOyoAP+IqP/giH/wBHaa7/AOI8ftFf/OyoAP8AiKj/AOCIf/R2mu/+I8ftFf8AzsqAD/iKj/4Ih/8AR2mu/wDiPH7RX/zsqAD/AIio/wDgiH/0dprv/iPH7RX/AM7KgD9Bf2O/+Ct3/BOn9vbWn8K/srftVfDn4l+N47O4v2+Hdx/b3gb4kS2VnE09/faf4B+IWj+FvFWtWGnwr5t/qGh6ZqdnZIQ91PEvzUAfo0CCAQcg8gjkEHuD3zQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/nWf8HiP/AAU2+KV78bPDP/BNL4aeJdU8LfCrwf4I8KfE39oO20e9udPm+JXjjxqJtZ8D+CfExt5EbUPBngnwpBpPi2LRJJDpWs+KfE9rqGr2N1deE9BltQD+GIsxxyxxwBvYAAdABngY6AdOBgdgBNxPdugP3m9/f/8AX3oANx9W7j7zYz83+HXH9MgBuOerf99H0z6/565oAAxI6nPPBdux/wA8/hQAbjjOW7fxNzkD3/zz60AJnJHBzzySeuM89Tz7/hkckAU9DkZH69R/nrQA3uOOct3PXH9aAAdeh/h55x2x75/ycc0AKe+fQ/Xtn/J70AKfcdxzn34/Xr7d6AEb3GevTOcZH/66AD+9x2Gffg9P1/zmgA9OO579+efx/rQAcenf1/2hn3680AKCR0yM57kZ5/yc9s8daAAs2T9/1+82D+v8s0AAY8ct1PVjnOM+v88evegA3Hj7/J/vNn69env/AJIABj1yevdm74/x+n55AAbj6t1P8ROeM/59DQB0/gvxv4v+HXi3w3488B+J/EPgzxp4P1iw8Q+FfFvhfV7/AEPxL4a13S7qK807WtB1vTZ7fUdJ1XT7qKK5tL2yuIZ4Zo0bdtBVgD/ZL/4IW/8ABQDxJ/wUk/4Jw/Bf9oL4hC0b4v6TP4g+EfxqudPt4rSx1X4lfDe5ttOvvFEFrbpFaWb+OfD174b8b3unWNvb6fpmpeIb7TtPgjsrWBQAfr/QAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAIeh+hoA/yKv8Ag6jZm/4LdftZbmZtuhfs8KuWJ2r/AMM8fDYhVznaoJJwOMknqSaAP549vuR9D/n6fSgBMZ7noDkHnv8Az6k9zzQAY57+n3h/tc/X09PwNABjJ79v4gf4Tz9ff8aAADp1HB6EevX684z6cUAAX6jp0PsP8/me9ABjkDk8nksM9B+P+HXvQAEcH8uoHofw/wDrn1oAQDnv1P8AED+P1/UdTQAoGSeT0HcHPHf/AOv1oAUjryehPXGMenp6Z7CgAI9z16Z45PT6e34UAIw6+49QO49fyz6cUAGPvcnIxzuGeh/L8aAFwMj6njIx359z+o/CgBMe56+o556n1P8A+qgBcc/Xd3756/Xnr26UANI5br0/vD8sdh9aAFA6devqD2z+OO350AJjgcnr/eHHXke/60AOx15J5A5PXOOv+HfJ9aAEIxnr3zlhz8vf/PHXvQAMMA8k8dz7j/H+negD/UY/4My2Y/8ABKv4nKWYqn7anxYCgkkKD8LfggxCgnABYknHUkk8mgD+tugAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgBryJGu6R1Rc43OwVcnoMsQMn60AQ/a7X/n5t/+/wBH/wDFUASpLFKCY5EkAOCUdXAPuVJwaAH0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAIeh+hoA/yKP8Ag6i/5Tdftaf9gP8AZ4/9Z4+GtAH88xOPT8c+o9Pr/nmgBAfp0X9Sfy68UANz349fb+P8f896AFH3u3UdP90/pQAKcAdOh6/X1/z2oAP4Tz6d/ZfrQAmeQeD9768Dvx1/DpQArHgj/wDX1X/H+XvQAgPPbq/8gev9fSgBVBYnAzwp4GfTnGP8P8ACQo5/hPQ/wnqfw6f55oANkn909R2PT8uv+fegBGSQj7p/75P94Y7Ht1oAjz9/p2/PGD/k/j3oAdnkf7zHrz/F2/rQAdu3Xt/vf5+vWgAz9P4zz9f5etADT95/93/D/PvQAo7dP69Pp+vpjk55AEB4X6j+bD/PfPNACg8dQef6r7cnnr/kACHv/wAC7eigfh/kc9aABiSD9AenqR3/AM59BQB/qL/8GZX/ACis+KH/AGer8V//AFVfwPoA/rdoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoA/gT/AOD2b9sfyNO/ZL/YJ8OariS/n1f9qP4rafDNtcWloNa+Gnwct7gRNmSC6upfi5qF3ZXWEE+m6BfLFI6QSxAH+fnQB/ow/wDBj/8A8m9/t6/9ll+Dv/qEeLKAP7l6ACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAEPQ/Q0Af5FP8AwdQ5/wCH3X7Wn/YD/Z474/5t4+Gv+f1oA/nkP+P8WO/19z/KgBPc56L/ABEdc5PX+f8AWgAznHX6bj/te/PQc5/KgA6nqeo/iPPyk+v455z60AA6dznPfnr9R+J/x5ADPGec8dWxngH6f40AGeRyTkt0bPb1z179eP1oAUnr/wDFf7v+PP8A9egBM8jr1Yfez0H16/j+fWgD+rb/AINMP2QP2Yv2x/2v/wBpXwR+1H8Dvh38dPCnhj9miLxV4d0H4jaFHr+naN4i/wCFr+DNIOsWFvLIghv20u+u7Ezqd5t55I87HdWAP72v+HFf/BIP/pHj+y5/4bi1/wDkqgA/4cV/8Eg/+keP7Ln/AIbi1/8AkqgA/wCHFv8AwSDUq3/DvD9lw4ZeP+Fb2hz8w7Ncsp+jBge4IyCAf41/xY02w0f4ofEfStLtILDTdM8eeMtP0+xtgYrazsrLxPq1paWsCbjshtraGKCJSxKxxquTjNAHAdx16nvxxn39vSgA7Z5646n+9j1oAPU8/wAX8R7HHr/+r1oAQnluvT+9+PH6nvigBR269f72e2eeeen09qAGgjjJP/fZ9+v+e9AC5z+fqT3Xvn3P8vXIAmev/Au5OflHvz685xQAN0P0B+8T1P4/iaAP9Rf/AIMyv+UVnxQ/7PV+K/8A6qv4H0Af1u0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAf4rX/AAWx/bG/4br/AOCnn7Wfx703Vf7W8CP8R734bfCaeKbztPf4V/CaKP4feDdT0xQzLBa+LLPQZPHM8KMR/afijUJiS8rGgD8rKAP9GH/gx/8A+Te/29f+yy/B3/1CPFlAH9y9ABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFACHofoaAP8ij/g6i/5Tdftaf8AYD/Z4/8AWePhrQB/PKQT39ewPfP+H8z7ACDJ/JT0+v4Z/wA+9ACdx07g8f73Ufn0989qAD+L8s/XaevPXr04980AKBkD6Htnv7/oPxPSgBP4eo7ZyM9h1/8A1Ht70AHpznO7n6j2yOvX1P40AK3Q5/Hj6fn09fY9KAGgc9c8t24zjn+mentnrQB/aT/wZNf8n1ftZf8AZpMX/q6PANAH+lZQAUANbp/wJP8A0NaAP8Fz418/GD4rf9lI8eZ4z/zN2te9AHmOOQeOrdv949f88880AHPH1x04+99f/r+/NAC//Zds9/f+XfmgBpHzNz2547e/6dMn6UAKOq++T09u3oD+B9aAE+bjn6cDPQ//AF+p9e/UAB+HX0Hcp/jQAdOM/wB78flB5/r1yefegBCCM5Pb+o9+np+PTuAf6jH/AAZlf8orPih/2er8V/8A1VfwPoA/rdoAKACgAoAKACgAoAKACgAoAKACgAoA4j4gfEz4b/Cfw9c+L/in8QfBHw08J2eftnij4geK9B8G+HrXClz9p1rxHf6bpsGEVnPm3K4UFjwCaAPzC+Iv/Bev/gjn8Lrh7XxN/wAFC/2dtSljdo3Pw88S6h8Xody5ztufhNpPjW2deDh0mZD2Y5GQDxSL/g5q/wCCHE121kv7d2gCZSMvL8EP2m4LQ5JHy383wVjsX6clbkgdTgEEgH0p8LP+C3P/AASR+M11ZWPgb/goP+zGL/UXWKw0/wAa/EbTfhZqN3PJxHbQWHxRHg68ku5WISG0WE3MshEccTSEKQD9MfD3iTw74v0ex8ReE9f0XxR4f1OLz9N13w9qtjrWj6hASQJrHU9NnubK7iJBHmQTyISCN2RQBtUAFABQAUAFADSyjqy59yKADev95f8AvoUAKCDyCD9DmgBaACgDkvHHj7wL8MvDOpeNfiT418JfD3wdo0ay6v4t8ceI9H8J+GdKidgiSalr2vXlhpdjG7kIr3V3ErMQoJJxQB+T/wASv+Dgj/gjN8J9UvNH8U/8FA/ghqV5YErcP8OD4x+MmnMylgRa6z8IfCvjnSL4gqQRY31wen94ZAOB0T/g5S/4Ih+IJYYrD9vTwZbvO6Ih1z4WftA+GYlZyFBmn8SfCXSYbdAT88lxJFHGMs7KoJAB+nv7N37X/wCy3+2H4Z1fxh+y38fvhV8evDvh69tNN8R6h8MfGWjeKW8Nalf2z3ljp/iSy0+5l1Dw9f3trHJc2tnrNrZXFxDFLJDG6xSFQD6NJA5JA9ycUAJvX+8v/fQoAN6/3l/76FADqACgBkkkcUbyyukcUaNJJJIwSOONAWd3diFVFUFmZiAoBJOBQB+cvxr/AOCv3/BLz9ni91HSfi3+3l+zF4e1/SJHh1bwtpfxV8N+N/GOlzx/ft9S8HeA7vxN4osbkdre60iKZuqoc0AfIM//AAcy/wDBDu2vVsJP27/DjTsSBJB8Fv2lrqy4OPm1K2+DM2nJ14LXQB5IyATQB9FfCX/guB/wSP8AjbeWmn+Av+Cgn7NQ1G/nS2sdO8c+PLf4Taje3UjbIrWzsPivB4Ku7q6nchILeCGSad2VIUdmUEA/T7Rta0fxFpdjrnh/VtM13RNUt0vNM1jRr+11TS9RtJRmO6sdQspZ7S7t5Byk9vNJG45VjQBp0AFABQAUAFADd6/3l/76FABvX+8v/fQoAcCDyDn3BzQAUAFAHh/xr/ab/Zv/AGbNJh179oj4/fBf4E6Ncqz2mp/GD4n+CvhvZ320suywm8X63pC38rOpjjhszPNLL+6jR5CFIB+YvjL/AIOK/wDgir4EvrnT9b/b9+Fd9PakiWTwb4b+KvxFsnIJH+jan8Pvh/4n028HBw1pdzqRggkEZAKPhj/g48/4Im+LSg0r9vn4c2nmEBf+En8DfGjwUBnp5h8Z/DTQREPUylAOpIoA+8/gH/wUL/YU/alnhsf2d/2vf2dfi/rM8nlp4Y8E/FvwVqvjFXJwon8GLq6eK7YSnPkNc6PEs4BMJkAJAB9i0AFABQAUAFABQAUAFABQB8tfHX9uH9jP9mF2t/2iv2rP2ePgjfhd6aP8T/jD4B8G+ILrKeYFsfD2ua9Z65qMpjzIsNjp9xKyAuEKgmgD86vE/wDwcff8ETfCN1cWeq/t8/Du7mtWKyP4Y8CfGrxtasR1Nvf+DPhnr9jeL6PaXE6t2Y0AUfD3/Byf/wAERfE1xBbab+3r4JtpLlwkbeIfhh8ffCVurN0Nxd+K/hPotrap6yXU0Ma/xMKAP0A+Bn/BRD9g39pm6t9O+AP7Y37Nfxa1u5dI4vDPgv4y+A9W8X+ZLjykl8Hx64PFEDSk7YhPpMZlYMqbmVgAD7IoAKACgAoAQso6sAfcgUAJvX+8v/fQoAN6/wB5f++hQAoIPQg+uDmgBaACgAoAKACgAJA5Jx7k0AN3r/eX/voUAG9f7y/99CgA3r/eX/voUAOoA/Hb/gut+3t4T/YF/wCCbn7Snj0ePPD/AIa+Nfjr4a6/8MvgB4cn8Qadp/jPX/iF8Qhb+B4PEHg3RZ7qHUtal+Glp4jn+ImryWME8Gn6d4bllvCokijmAP8AGQoAKAP9GD/gyAIH7Pf7euSBn4y/B3qcf8yR4s9aAP7lt6/3l/76FABvX+8v/fQoAN6/3l/76FADgQeQc+4OaACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAEPQ/Q0Af5FP/B1AM/8ABbv9rTqf+JH+zx0/7N4+GvrQB/PKefX8CPX60AJj69vTt/nnPXtQAY/3vzH+1+vP8vegAxznnt3HoR9e/PvQAYwOM9/TPX8v/rZ70AGOCOe3pnjH+H86ADHs3Ge47/j+Xv19aAAjOc5P/wCsep9v5nuKADHf5s5J7dSP849+tAH9o3/Bk1/yfV+1l/2aTF/6ujwDQB/pWUAFADW6f8CT/wBDWgD/AAXPjVz8YPit1/5KR486df8Akb9a9aAPM8c9+pPbGTn8e9ABjjv1z2z1z/n296ADH179/U5/P0/WgBCOSeeeO39aAADp14+npjnk/pQAY6deMdx6H39+ffHagBMYx169yPY/0/IHvigAxnqG5z6dx9fbAznnrQAEZByDnHXjsc+p59fXtQB/qLf8GZX/ACis+KH/AGer8V//AFVfwPoA/rdoAKACgAoAKACgAoAKACgAoAKAPm/9q39rr9nP9iL4NeIvj7+1D8U/Dvwm+GHhwx282ta5JPPf6zq9xHNJYeGvCXh7TYbzX/F/irU1t520/wAOeHNN1LVrmG3u7sWq2VleXMAB/nqf8FKf+Dw39qL406n4j+HH/BPLwtD+zB8KPPuLC1+MfjDTdF8WftBeK9PBMf2+w029Gs+APhZbX8LyobGys/G3ii0222oab430a7MlpCAfyNfGX48/G79orxld/EP4+fF74lfGjx1fbxceLfij428R+OtfMTuZPssWp+JdR1K6trGNji3sLaSGyto1SG3giijRFAPJ6ACgAoA+nf2aP20/2tP2N/E0fi79lv8AaJ+LfwN1f7ZDfXsPw/8AGmsaPoGvTW5QxxeLPCIuJfCfjGx/dxiTTPFWi6xps4jRZrSRUUAA/tk/4Ja/8HjlzqGr+Hfg/wD8FS/B+mWltf3FlpNh+1f8I/Dz2VtYPK0cJ1D4x/CfTDPELQyNLcah4q+FVtbpZRiC2g+F8sf2rVYgD+8rwH4+8D/FPwZ4Z+Ivw18X+G/H3gHxno9n4g8JeM/B+taf4i8MeJdD1CITWWraJrmlXF1p2pWF1GQ0VzaXEsTcjduVgADraACgAoA/xbv+C7n/ACmG/wCCh/8A2cv44/8AQrSgD8mKAP7+f+DHn4uYk/4KC/Ae9uvvJ8Bfi54bst/TY3xI8G+Nbryyec+Z4Ai3oOMYkPMdAH9/9AH8f/8AwWm/4Oo/hL+xXrXin9mr9hay8I/tCftN6Fd6h4f8efEXWJLnUvgd8FdatWltL7R1bSL2xuPij8QtIuUeC/0XSNUsfCXhfUU+za9rur6xp2s+DYQD/O2/ax/bn/a6/bl8c3PxC/au+P8A8R/jTr0l9c32m2XirXrj/hEPCzXZbzbTwP4C077D4J8C6bhmA03wloGj2RLSSPA0sssjgHyhQAUAf6cf/BlX4C/sT/gm7+0L8QJ4fKuvHn7Y/inSreQrzc6L4J+EHwgjs5g/8SrrPiHxFbheQrQOc5cgAH0B/wAHgn/KHrVP+zl/gf8A+g+MKAP8pygAoA/3FP8AgmZ+0I37Vn/BPj9jT9oS5vv7S1r4m/s6/C7WfF92ZPOLeP7LwvYaH8RITNktK1p460vxDaNI+JHMBaREkLIoB+a3/BZv/g4S/Zl/4JQ6bcfDLRbOy/aB/bD1XTYLvR/gZoeupp+leAbLU7QXWl+KfjV4mtYNQfwrp1xbSwalo/hCztp/Gfiq0ms54bbQvD+pR+LbYA/zWf29P+Cyv/BRD/go3rGrn9oz9oPxQfh1qF7Pc6f8CPh1c3XgD4IaJavIz2tgvgXRLtY/FJ05WeGy1z4hX/jLxUsTyJLr8yyOCAfl1QAUAFAH2r+x7/wUX/ba/YI8TweJ/wBk79o74lfCNRqEWpap4Q0vW5NU+Gnie4iZCf8AhL/hhr66p4C8T7408jz9Z8P3d5bxPJ9jubaRvMAB/oZ/8EYf+DqP4N/tv6/4U/Zs/ba0nwr+zl+094hu7LQPA/jfR7i7tPgT8Z9dudkFno1rPrd7fX/wv8eavcsLbS/DniDVtV8N+JL8x2eheKbbXdV0jwhKAf14UAFABQAUAf4N/wC0h/ycR8ev+y0fFL/1ONdoA8XoA/0sv+DKT9oT/hNP2Jv2of2bb+++06n8CP2gNK8f6XBJJ+8sfBvx08Hw29jYwRk/8eqeLvhX431JmUEi61mYSNhohQB/Tp+3Z/wUD/ZX/wCCcXwT1H47ftV/Emy8E+Gla5sfCnhuzSPVviD8TPEkFv8AaIvCPw48IxzwX3iTXZw0RuH8y00TQraYat4o1nQtEiudTgAP84j/AIKPf8HZv7fn7WOq614N/ZRvp/2IvgZIbuytf+EF1C21b49+KLByY477xH8W5bGG58G3MqpHeWun/Cuz8LX+jyzT2F34v8UwpHdsAfy5+LvGPi7x/wCItU8X+O/FXiPxt4s1y5e91rxR4u1zU/EniLWLyU5ku9U1vWbq91LULmQkl57u5mlcnLOTQBzlABQAqsyMrqxVlIZWUkMrA5DKRyCDyCDkHkUAftV/wT+/4OAf+CmP/BPC70zR/h18cNQ+Lfwfs5o/tPwJ/aAm1X4l/D4WYcGS28MXt7qlr42+How08sUPgTxVoOky30xvNW0jVyDE4B/pS/8ABIT/AILh/sqf8Fa/h8kHgu+g+FX7TPhfRor/AOKH7NninWLa48S6XHF5UN74r+HuqNFYp8Rvh2buRIv7d02ytdX0GSeztPGGg6BNqGkPqgB+1FABQAUAFABQB+Ev/BW3/g4B/Yz/AOCU1jf+BdfvJvjr+1Vc6TFqHh/9nLwBqtrb6hpC39v9o0vVPi54ya31LTPhfoV5C0NzbW9zY63421SyurPUdF8Gaho88urWwB/nfft4/wDBxv8A8FSP27NT1vTdQ+O2r/s6/CPUvPtrT4N/s13+r/DPRP7KlLo1n4n8Z2Gov8SPG73tr5UWsW2v+LJfDV5Ksslh4Y0i2uJLMAH4UXFxPdzz3V1PNc3NzNJcXNzcSPNPcTzO0k0880jNJLNLIzSSSSMzu7MzMWJJAIaACgBQSpDKSGBBBBIIIOQQRyCDyD1zQB+y/wCwv/wX1/4Kg/sCajpNr8NP2jfEvxO+GWnG3hl+CH7QV7q/xb+GUumW5BTS9Fg13VU8WeAbQctj4b+KvCBkkYtdfakZ4nAP9D7/AIJBf8HG/wCyH/wVGl0n4SeIoF/Zq/a4ks0z8F/GmvWd74e+I91BC0l/c/BPxzJFpsXi2WKNGvbjwXq2naJ44srb7XLYaX4k0fSNR8RKAf0RUAFAH8e3/B1v/wAEcf8Ahrb4Ev8At9/s/wDhX7X+0h+zX4UnX4r6Bolnu1P4v/s/6R9p1LULsW8CF9S8ZfCAS33iLSmAW91bwPP4o0YvqV9pHg7S4wD/ADEKACgD9Qf+CQf/AAUq8ff8Esf21vh5+0f4c/tPWfh1eOvgX4+/D6xnCr8Qfg7r99Zt4jsLeCWWG2bxN4cntrPxh4JuZpreOHxRoWnWt7cDRb/V7a6AP9nr4WfFDwD8bfhr4D+MHwr8T6Z41+G3xN8JaD458DeLNGlM2m+IPC/ibTbfVtG1S1Z1SVFurK6id7e4jhurWXzLa7hhuYpYkAO9oAKACgAoA/zK/wDg7O/4K+/8NPfHdf8Agnl8CPFH2r4C/s1eKZbr4161o15nT/id+0LpiXOn3Xh95oHZb7wz8F0nvtBWFmS3vPiJeeKJ7m1u08LeFtUAB/G3QAUAf1Pf8G5H/BO74L+IPEniv/grJ/wUC1/wr8MP2D/2LfEGnXvh3W/iZItj4S+J/wAfbW801vDdr9nmimm8R6D8O9U1HQ9QbRNOtry68ZfEfU/BPgzS7HxCF8VaGgB+pv8AwU0/4PKr+7/4SD4V/wDBLv4eHTYD9q06T9qb41+H4ptQkHzxDU/hZ8GtQEtpaDcEutM8QfFb7bJLFJJban8LLOZUuAAfw7fHf9oT44/tP/EjW/i9+0P8V/Hfxl+JfiF86p4y+IPiPUfEesPbpJJJb6bZS380sWk6JYebJFpWg6TFY6LpFsRaaZYWlqiQqAeO0AFABQAUAFABQB/rO/8ABpR/yha+Df8A2V39oD/1ZWq0Af0t0AFABQAUAFABQAUAFABQBy3jbxv4O+G3hHxH4++IPinw/wCCfBHhDRtQ8ReK/F3ivV7DQPDXhvQdKt3u9T1rXdb1Se107S9L0+1jkuLu+vbiKCCNSXfJAIB/Lb8ev+Dw7/glf8JvF2peFPh9oP7SX7RMGl3txZy+Nfhf8PfDOheBb5raZreWTRdQ+KXjfwR4h1W38xJDDfJ4Wt9PvIfLuLK6uIJUlIB4N/xGwf8ABPr/AKNc/bO/8FHwO/8Anu0AH/EbB/wT6/6Nc/bO/wDBR8Dv/nu0AH/EbB/wT6/6Nc/bO/8ABR8Dv/nu0AH/ABGwf8E+v+jXP2zv/BR8Dv8A57tAAf8Ag9g/4J9EH/jFz9s7/wAFPwO/+e6f60AfxB/8FlP25/hx/wAFHv8AgoT8bP2vPhP4S8ceCPAvxM034W2mkeG/iNFoMPi2xk8C/C7wl4F1J9Ri8M6vrujLHeanoF1d2C22q3TfYJoPtBjuPMjUA/L0j/d79R7/AP1+ff60AJ+XRevTvzjj8KAEHUdD/L+P9P59aAFHJ6dx29VPH/1qABeg6Hg9f971waAAdO3br/ujuQf5f40AHcdOrdBgHj8fx/KgAbgHp/nb/k/hQAg6jgdXH6d/zoA/tI/4Mmv+T6v2sv8As0mL/wBXR4BoA/0rKACgBrdP+BJ/6GtAH+C58a/+SwfFbp/yUjx4ef8Asb9aoA8y7j/ebnv/ABf560AHYcDr19Pm6f5//WAJ6/R/5nv/AJ60AIRy/Tt1Hr6e/wDM0AL3HTv268Dof5njPpQAmPu/d5x29j155/x/UAUfUde31Xvjr/X6cACevT+Lt/sj8vf3/OgAPQ9Og6Y9Rzn0OeM/p3AP7DP+CCv/AAcVfsrf8Eov2M/GH7N3xp+Cv7QvxC8XeIf2gPGnxbttd+Fll8N7rw5DofiXwZ8PPDlpps7eLfHXhnU11a3u/B9/PdrHYy2Zt7mzMN0ZRcRqAftp/wARsH/BPv8A6Nc/bO/8FHwP/wDnu0AH/EbB/wAE+v8Ao1z9s7/wUfA7/wCe7QAf8RsP/BPr/o139s7/AMFHwO/+e7QAf8RsH/BPr/o1z9s7/wAFHwO/+e7QA5f+D1//AIJ8l1D/ALL37Z6qWUMw0f4HsVUnDMF/4W8NxUZIXI3EYyM5oA/YD/gnZ/wcBf8ABOP/AIKV+LLf4X/Bv4i+Jfh58bL61lu9J+Cvx08PWngPxx4lhtYZbi+/4Qu9sNa8R+CvG15ZQQXN7caJ4b8VXviWHTLe41OXQksLe5uIQD9ss55ByDyD1znvQAUAFABQB8h/t1fttfA3/gnp+zJ8Rv2p/wBoPXX0vwP4CsFTT9F09raTxR4/8ZaissXhb4eeCdPup7dNU8V+Kb+M29nC80Nlp1lFqPiDW7vTvD2javqdkAf49f8AwVA/4Kk/tK/8FVP2hNW+NHx01250vwjpd1qNj8HPglpGqXlx8P8A4N+D7iZPK0jQbSUW8Wp+JNSgt7Ofxn44u7KDWfFuqQxySx6dolhoPh/RQD816ACgC9pumalrN7BpukaffarqN0xS10/TbS4vr25cKXKQWtrHLPMwRWYrHGxCqWPAJoA9avP2b/2iNP0lNfv/AIC/Gix0KUMYtbvPhb44ttJkCgMxTUptCSzcKCCxWY4BBPWgDxuaGW3llguIpIJ4JHhmhmRo5YZY2KSRSxuA8ckbhkdHAZWBVgCCKAI6ACgD+lX/AIN7f+C7Hj3/AIJk/Grw/wDA342+KtW1/wDYN+KniZbTxxoF8brV2+BXiPXZlgT4weArdfOvLHTLe9eGb4l+F9LSWDxDoJvtcsNLvPF2m6ct8Af6yGia3o/iXRtI8R+HdW03XvD+v6ZYa3oWuaNfW2p6RrOj6raxX2matpWpWcs1nqGm6jZTwXljfWk0ttd2s0U8EskUiuQDToAKAP8AFu/4Luf8phv+Ch//AGcv44/9CtKAPyYoA/q1/wCDOj4uf8K//wCCteoeAZ7rZa/Hb9mH4ueA7ezd8R3Gt+F9V8FfFuzuETI33Vpo3w88RRxnnbbXl7xzuUA/dP8A4Okv+C82ufs42esf8E4f2N/G93oXx08S6NbP+0z8WfDF61tq/wAJfBniTS473TvhZ4Q1W3YXOmfEbxrol/a6v4l8QWUkF54M8H32nWmj3B8R+Jpb7wiAf5vBJJJJJJJJJOSSeSSTyST1NACUAFABQB/rc/8ABqB4C/4Q3/gif+zxrbQ+TN8TviD+0D49mBXa8nkfGTxd8PreZwcH97ZeArV4mP37cwupKMpIB5T/AMHgn/KHrVP+zl/gf/6D4woA/wApygAoA/tS/wCCeP8AwcSeDv8Agn5/wQHu/gX4N16y1z9ubwR8YPix8Kv2ffAOpadc6jbeEvA3xDvI/ilF8dvEjXdpJod34X8HeIPHPjbTtD8O3FzdXeveMtL0jSbzRx4Vk1XUrQA/jd+IPxB8cfFfxz4t+JnxL8V6946+IPjzxBqnivxl4x8Ualc6x4h8S+I9bu5b/VdY1fU7ySW5vL6+u5pJppZXJy21QqKqgA4+gAoAKACgAoAUEqQykhgQQQSCCDkEEcgg8g9c0Af6V/8Awauf8FxvE/7WHh4f8E7v2sPFs3iH4+fDDwhcaz8Afih4i1OS58Q/GP4ZeG4l/tjwP4nvL52uNb+I3w20vytR0/V/PudU8XeALbUL7V4f7T8Ea1r3iIA/tIoAKACgD/Bv/aQ/5OI+PX/ZaPil/wCpxrtAHi9AH9IH/BtF/wAFRPgp/wAEwf2qf2hfGv7SnifVvD3wW+JP7Mfimxe20PQ9X8Q6rr3xW8BeItA8W/DjQtN07SbW6X+1fEGmjx14V0e61aTS9CtdV8R2U2ta7o+mrdXagH5kf8FMf+Ckf7QH/BUL9p3xX+0R8ctauodOa61DSPhL8L7W/muPCPwc+G/2+WfRvBfhq3KW8Et0sHk3PinxK1pb6l4u1/7TrGoLEjWdjYgH57UAFABQAUAFABQB7R+zt+0H8W/2U/jf8NP2ifgV4uvvA/xX+Evimx8W+DvEdid3kX1mXiudP1K0YiDVtA13TZ73QvEmhXqy6dr2galqWjalDPY31xC4B/s7f8Epv+CiHgT/AIKhfsUfC79q7wdpsXhnW9b+3+D/AIseAo7sXp+Hvxe8Ji2t/GXhiO53ySXGkz/a9O8UeE7q6KX974L8SeHL7Urez1G5urK3AP0aoAKACgD+U7/g48/4L7wf8E3vBsn7KH7Lmrabqn7bPxP8L/b9S8Sr9j1PT/2afAutxNFY+LtUsJhcW178T/Ets0tx8PPDOoQyWuk2Kp478TWsulv4Y0fxgAf5bPizxb4p8e+J/EHjbxx4k13xj4y8WaxqHiHxR4r8Uatf694j8R69q11Je6prWua1qk91qOq6rqN5NLdX1/fXM91dXEsk00ryOzEA56gAoA7bwh8NPiP8QZTD4C+H/jbxvMJfIMXhDwprviWUT4VvJMejWF64l2ujeWRvw6nGGBIBe8ZfCH4s/DkOfiF8L/iJ4ECOsbnxl4K8S+GAkjsERHOt6ZY7XdiFVThmYgAEmgDzugAoA0dI1fVvD+raXr2g6pqOia7omo2Wr6LrWkXtzpuraRq2m3MV7p2qaXqNnLDeWGo2F5DDd2V7aTRXNrcxRTwSxyxq4AP9R3/g2f8A+C7d/wD8FDPh7d/sjftW+LLO5/bM+EOg/wBo+GfFuoG3sbv9ov4WaXFBBN4ikVPKt7v4peCCY4fHdtbRQz+ItDmsPG9rBeTw+NptJAP6yqAGuiSI8ciLJHIrJIjqHR0cFWR1YFWVlJDKwIIJBBBoA/yeP+DmT/gjo/8AwTi/am/4Xv8ABTww1j+x3+1Dr+r6z4LtdMtWXSPg98VpRPrPjD4PSeSn2bTdDu1N14t+F8DC1R/C7ax4X0+C4HgC/v7kA/mQoAKAP7y/+DQj/gr1/wAI3rs//BK34+eKNmgeJ7vXPGH7IGv61ebYNI8Tzm613x78DluJ38uK08Ut/aPj3wFbt5Ea+JU8aaMs95qPirw3psQB/oW0AFABQB/PF/wcbf8ABXS2/wCCYn7G954Z+F/iCC2/a7/aWs9d8DfBCC1njbVPh9oaW0dt45+N1zBkm3Hgyy1C307wY04K3vxA1fQ7lbTVNJ0HxLDbgH+RXc3Nze3NxeXlxPd3l3PLc3V1cyyT3NzczyNLPcXE8rPLNPNK7SSyyO0kkjM7szMSQCCgD70/4JqfsAfFr/gph+198Mf2VfhNFNYv4ovTrfxG8cvZSXulfC74U6HcWr+NviDrKK0UTppVncw2GhafPc2a+IfF2q+HfDEN3b3WtQSqAf6MH/Bwx+zf8Jf2Q/8Ag3G+J/7NfwL8NQ+FPhX8H3/Zj8I+FNKQxyXc0Fr+0J8OZ9R1zW7yOKE6p4m8Tavcah4j8Ua1LGtxrXiHVdT1W5Hn3clAH+VXQAUAFABQAUAFABQAUAf6z3/BpT/yha+DP/ZXP2gP/Vl6tQB/S1QAUAFABQAUAFABQAUAFAH8BX/B6j+3B4/0HVP2bf8Agn34S1u70PwN4u8EP+0p8Y7GyuJbf/hOUXxlrngr4S6Dq5hmj+16B4d1zwZ438UT6PepPp994hi8L6s8JvfDdjJGAfwAMSx3HBJ5ZiWJJJPJOcknuTznjrgUAAx/s9+57j6n88/rQAgx146N3Oe/bP8AnmgAHX+HuOp649ckd/8A9RIyAHGO3Udzjoeeuf8APqDgAT/vn8z1/wC+v8ffGeAAP/AT+J/qf89enNADjg/3e/Un1J7Hv+f9ABDjnp0Xuefrz/8Aq7+tABxkdOh5yf8Aa9+nr/8AXoAXPP8AXPP3f979e+etAAMEduh6k46/Xj19/wA6ADII/LjJ9B2zn9D0oAOOOn8Xc4zj6+/P5jqMgCnHPQ/ie+3qc/rn+tADR1/h/i7nHTHr39+1AH9pH/Bkz/yfT+1l/wBmkRf+ro8A+tAH+lbQAUANbp/wJP8A0NaAP8Fv41/8lh+KvT/kpHjzgk8/8VfrXuP6/wBaAPMuNw6dT3Oc5Pv3/XPvQAnp06joT/ePbOTz7HvQAvHPTo3Un+99e/fue1ADT1bgdfU89ffr+H1xQA4fw9Ordz+nJx7g4PtQA38ufc+h6/N+H8+oBAFH4df9r1X1/wA5x2zQAfl/F3Pp9eff8x60AIcYPQ8DuT3Hv0/Ud+vIAdyeDwe/PT/eOf8APPoAKPvHp198/eHvj+ue2OoAccnj8zn7w7k9ffuehPNACDtwPfk9j67v54/GgBR34H6+jdyf1zjrzQAh7fd6epHc+pH60AdB4V8T+IvBXiTw/wCL/CGu6r4X8VeFtb0jxJ4Z8SaFf3el634d8R6Hfw6lomv6LqljNBe6bq+jalbW+o6bqFpNDd2l1bxSwyoy0Af7Xn/BI/8Aa5179uz/AIJvfskftUeLhbf8Jt8T/hiIfH89lBFa2d/8RPAPiPXvhn8QNWtLOEmKytdY8Z+Ddc1W3sYzssobxLVeIqAP0ZoAKACgD/Jd/wCDm7/gq3qX/BQf9t7W/gz8OPEU8/7LH7Iuu+Ivhx8P7SwvWfRfiF8TLG7fSfiZ8XZ44JHtNSivNVsZPCPgS98y7t08FaLFrmlNYy+NNdgmAP5qaACgD+0L/ghN/wAGtt9+2D4L8Jftff8ABQaTxX4C/Z38W6fp/iP4PfA7w1qTeG/iF8ZdAuzHd2PjPxtriQy6l4D+GOuWWG8P2Ojmx8deM9Ou18Q2GreENCGh6n4qAP8AQe/Zw/Yz/ZP/AGQfDdt4T/Zi/Z2+EHwO0i3sINNmk+HngbQdC1zWLe3RESXxP4qgsz4o8XahJ5aPdav4o1jV9VvZVE15ezzfOQD6YoA+B/2xv+CX37BX7e3hvW9C/af/AGZfhf4+1fWbVreL4lW3hyw8M/GLQpVTFteeHviz4ch03x3pkltKsU5sRrc2i6gYIrbWdL1Ow8y0kAP8zP8A4Ltf8G/vxR/4JNeJrb4ufDTWNb+MH7FHjrxFFoHhL4h6tHZt46+GPibUILm7sPAHxct9Ks7DTpZ76CzvP+EZ8eaPp9hoPiQ2stlfaZ4b1trLTNRAP5y6ACgD/Sb/AODOf/gpXrHxs+BPxL/4J6fFrxbda346/ZttLb4gfAWTWr03Op3n7P8ArV7a6Nr3gyyllZ7mfTvhN42u9NOnC4kkay0D4jaLoGmLDo3hm1trUA/tdoAKAP8AFu/4Luf8phv+Ch//AGcv44/9CtKAPyYoA+rf2IP2xfiv+wF+1J8Kv2ufghaeFL/4n/CC78T3Xhmw8c6fq2q+Er0eL/BHiXwBrVrrmnaHrnhvVLy0m8P+K9VRYrXW7E/aDC8rywrJBKAeFfEz4k+OvjJ8RPHPxZ+J3ibU/GfxF+JXizX/ABz448WazMJ9U8ReKvE+p3Osa5q99Iqonn32o3dxO0cUccEIcRW8UUKRxqAZHhPwj4r8e+JdE8GeBvDHiHxp4w8Tajb6P4c8KeE9F1LxH4l8QateOI7TS9E0LR7a81TVdRupCI7eysLW4uZ3IWKJmOKAP6n/ANjP/g0B/wCCkv7Reh+H/G3x88RfDH9jbwdrkMN2dD+IT6r47+Ndrp9zh4Lqf4X+Elt9D0yd7f8AezaJ4t+JPhXxHYSslpqej2V0LiO2AP238G/8GQv7L1jHEPiD+3J8e/E8oQCd/Bvw3+HngWOSTHzNFHrd78RWhQtkhHlnZR8pkY/NQBl+P/8AgyB/Z51DTrlPhb+3h8Z/CerHJtLnx/8ACTwR8QtOTAOEubLw74j+GNzJuOAZYr+LYMt5Mh+UgH9XP/BOD9kFv2B/2If2d/2QZPFln47ufgf4Mu/Dd/4y0/R5vD9l4m1PUvEuu+J9U1i30S4v9Um0xL3Uddupvskuo3rxMxBuZvvkA/Eb/g8E/wCUPWqf9nL/AAP/APQfGFAH+U5QAUAFAH6U/wDBMf8A4JV/tU/8FWfjbN8I/wBnPQbDT9B8M21rq3xU+MfjP+0rH4ZfCvQrt5ks5vEOq6fYahdX3iHXZLa5tvCfg/R7W71zxBc215dCKx8P6R4g1/RgD/Q5/Yz/AODSD/gl3+zxoHh2+/aA0Lxj+2T8VLGGG51rxD8RvEWveCfhs+srgyP4e+FHgLXNMtk0UAbE0jx54k+IfmlpZbi5fdBDbAH7J+Gf+CTn/BLvwfAYPD3/AATq/Yist0TQPcTfsu/BbUtQlhdSjxT6nqvgy91GeN1JDpNdOr5O4HJyAfP/AMdf+CBP/BH39oHw7qXh/wAUfsF/AbwQ9+Gkh8QfA3wrD8BPEml3m1hFe2Go/CJvCEbtA7eaLDUrXUdGunVV1DTLyHMRAP4kv+C0H/Bqn8T/ANiXwf4z/af/AGHvE3if4/fs0+DdKufEnxE+Hfi77BdfHb4SaBZJ5ureJIJ9B0rSdH+KHgTRoVm1DWdR0rR9C8VeFNHU3ep6Fr+j6Xrvi20AP496ACgD174BfHT4m/syfGr4X/tBfBrxHceE/ij8IPGmh+O/BWuwbnW11nQrxLqO3v7YOialouqQrNpWvaPclrLWtEvdQ0m/jlsr2eJwD/bZ/YB/a98Jft6/sa/s9ftc+DLWHTNM+Nfw803xHqmgQ3Yv08KeNLCe68O/ELwb9u2xtef8Ih470bxH4bF48UD3i6Wt20EBn8pQD7BoAKAP8G/9pD/k4j49f9lo+KX/AKnGu0AeL0AFAEkUUs8scMEck000iRQxRI0ksssjBI4440Bd5HchURQWZiAASaAP7XP+CUf/AAaDfFL4/wDhrwh8d/8Agov428R/s/8Aw38TaXZ+INB/Z78CRWsHx81fTL+FbrTpfiHrfiLStR0D4SrcQPb3MvhgaJ4s8Z/ZribTtch8Ba5bSRIAf17/AAK/4N5/+COPwA0WDSfDv7C/wi8f3SiJr3xB8dbXU/jrrep3MS7TdT/8LR1DxNo+nmXrLZeH9I0bSi3zLp6HOQD6wuf+CWP/AATHvNKTQ7n/AIJ1fsLy6REGEOnH9kv4Cra25YAF7aJPAKi3l4B86Dy5QQGD7gDQB+Vv7Zv/AAav/wDBJ/8Aal8Oa9J8N/hJc/shfFS/jkm0f4ifAPUdTsfDtjfqHa2i1b4N61qd58M73QfObdf6f4a0fwXrd1APItPE+mERyxgH+bZ/wVA/4JeftIf8EqP2ibz4F/HvT7bVtD1uG/174P8Axe8P286eCPjB4ItbtLb+3tD895p9I1zS5J7Wz8Y+DdRmk1bwtqk8CtNqeh6l4f8AEOuAH5u0AFAH9p//AAZeftl6/wCAP2vPjh+xHrWpb/h9+0H8Nb34r+ENPuLhj/Z3xf8AhG9kt6NJtiRHGfFfw01bxDceIZl3Tzj4feGUCGK2kdAD/SpoAKAPgX/gp1+3l4G/4JsfsUfGr9rXxrb22sXfgXQ49M+HXg24ufsz/ED4r+J5ho3w+8HIyOt0LK+1y4iv/El1ZLNd6R4P0zxFr0dvOulPGwB/iqfG/wCNPxL/AGjfi98R/jt8ZPFOoeNfij8V/F+teN/G/ibU5C9xqeu65dyXdz5MWTFY6bZq8en6PpNosWnaNpFrY6TptvbafZW1vGAeWUAe4fs3/s4/Gb9rf43fDz9nX9n3wRqXxD+LnxR11NA8I+F9NaCA3E4gmvdQ1HUtQvJYNP0XQNC0q1vdb8Q69qlza6Xomi2F9qmo3MFpayyKAf6dH/BLL/g1a/Yf/Y38LeFPiB+1v4W8M/tj/tOPp8F54h/4T3Tk179nvwPq86+ZcaR4D+F2s2MemeL4dNLrZ/8ACWfE/T9cvNSuLVdb0Xw74Ie4OlwAH9QPhfwp4W8D6Dp3hbwX4a0Dwh4Y0eAWuk+HPC+jadoGg6XbKSVt9O0jSba00+ygUkkQ21vFGCThaAL+raRpWv6ZfaLrumafrWj6nbS2epaTq1lbajpmo2c6lJrS+sLyOa1u7aZCVlguIpIpFJDqQaAP5x/+CnP/AAbG/wDBPr9uzwr4j8UfBr4f+GP2Pf2lV0y9m8MfEH4NeHdP8L/DbxHrqpJNZ2vxV+E2h2tp4X1fTr67eT+1PE3hWw8OePBNcLqN3rWvwWS6HeAH+XN+19+yL8d/2F/2hPiD+zJ+0f4Qfwb8UvhzqENtqVtDcDUND13SdQt47/QPFvhPWo0jg13wp4m0qe31PRtTiSGUwzPZ6laadrFnqOm2YB80UAes/An44/FD9mn4x/Df4+fBbxZqPgj4p/CfxZpXjPwV4n0yRkn0/WNJnEixXMWRFqGk6lbtcaVruj3iy6drmiX2oaPqdvc6ffXVvKAf7c37Bv7XXgb9vH9j/wCAP7Wnw+MEOh/Gj4faV4j1DR4LgXTeE/GVq02i/EDwRczgky3ngrxxpniDwvczHi5l0o3Ue6GaN2APrmgD5G/bq/Yv+D3/AAUE/ZZ+LH7KXxw037T4N+JugSWllrltbwTa74D8YWDfbfB/xC8LST4W38ReENditNVs1Z1tdSgiu9D1VLnRdV1OyuQD/Fl/bT/ZB+MX7B/7TfxZ/ZW+Omj/ANl+P/hT4kn0mW9t45xovi3w/col/wCFfHfhe4nSOS88L+M/D9zp/iDRZ5ES5itb4WWowWeqWl9ZWwB8tUAdJ4O8YeKfh74u8L+PfA+v6r4V8aeCfEWi+LfCPifQ7yXT9a8OeJvDuo22r6FrukX8DLPZ6npOqWdrf2N1EyyQXMEUqEMoNAH+yZ/wRB/4Kl+Fv+CrH7EvhH4v3Nzpen/H34efYfhx+0x4KsPJtv7F+Jen6ekieLdL0xSJLXwb8TNOjHizwwVR7Ownl13wjFe31/4R1SagD9iKAPIfj98dvhf+zF8Ffid+0F8afE1r4P8AhZ8IvB2s+OPGviC7w32TR9Gtmma3sbbcsupa1qtybfSdA0a133+ua5fafpGnRT319bwuAf4tX/BT/wD4KD/FD/gpv+2R8Uf2qfiS11ptj4hvR4d+FfgSW7N3ZfDD4Q6Bc3aeCPA1iyn7O9zaWtzcaz4nv7WO3g1zxprXiTxAttbf2r9niAPz6oAu6bpuo6zqNhpGkWF7qurare2um6Xpem2s99qOpajfTpa2VhYWVqktzeXt5cyxW9ra28Uk9xPIkUSPI6qQD/Xg/wCDd7/gkFp3/BLb9kG11j4laJZH9r79omy0Txn8edUdILm88DaclvJdeD/ghpl7HvRLLwNbX89x4sls5ZYNZ8fajrkq32p6JpXhdrMA/av47/AD4LftPfDPXPgz+0F8NPCfxd+FfiW40e71/wAB+NtMj1fw5q1z4f1ey17RZ72wlISaTTNZ06x1K0Yn91dWsMo5WgD4B/4cXf8ABH7/AKR2/sv/APhu7H/45QB/JR/wdu/8E9v2JP2Nf2Zv2T/Ff7LH7Mfwk+BHiPxj8dvFXh/xRrPw68MW+g32u6JafD+71K20zUZoXYz2kF+i3UcTcLMocc0AfweUAFAH90P/AAaM/wDBP/8AYr/bN+CP7Zev/tU/s0/Cf48az4H+Knwv0jwjqXxG8NQa9deHtM1bwj4jvdSsdMkmdTb297d21vPcIv8ArJIUY/doA/r5/wCHF3/BH7/pHb+y/wD+G7sf/jlAB/w4u/4I/f8ASO39l/8A8N3Y/wDxygA/4cXf8Efv+kdv7L//AIbux/8AjlAH3v8AAP8AZ2+B37LXw3074P8A7O3wu8IfB74YaRqGrarpngfwNpcej+HrHUddvZNR1i8t7GIsiT6jfSyXV04OZJnZzyaAPZ6ACgAoAKACgAoAKACgAoA/zB/+D1T/AJSm/ALp/wAmBfCzr/2cV+1T+JPPHp+JyAfyCcZ/hx9e2T/npzk80ALxnt+fbH+c+q9aAEHTtk54z9ehz/X1oAPb5e3fngfr/h160ABx/s9RyTx0P6/n6/QAXjdnjp1zz1+v05+nNACHHbb09cdwfUe5/wD18gDuO+PxOf5/Tn396AEGOvHQZ54zyfX6YoATjPbHse3ze/5/U0AL37dR35+6ff8A/WM/WgBBjAzjv3z3+p4/r70AHY9O3U5HQe5/D8KADuOB3yc/r7+/Hr26gCnoemfcn1HU5z/kUAJxn+HGW7/T3/P2+poA/tH/AODJr/k+r9rL/s0mL/1dHgGgD/SsoAKAGt0/4En/AKGtAH+C58av+Sw/Fb/spHjzr0/5G/WutAHmWBntnJ789/fP1H14oATtjjnHQ+/bn8RjuDQAvr/wI8n3+vT+9njNACcfN07Dk9Tg9ef8KAF646fn/wCg8/nQAccdOMZ56cH3/wAe596AE/7559Pw/L69c7aAA9+nOc889Aex9fvf5NAA3I7dO5xnnjoenXqetACnv05z36/L9fz9qAD8s89+fvfX6/8AAsUAHvx1/HtnPPXr+OKADuDx/F39T256mgBO5Py/UH6nnnr6+2aAFBA7r7c+/wD9YfjmgBD347evHf3xj8O5z1NAH+vt/wAGuf8Aygp/Ya+v7TX/AK2H+0F/n19eaAP39oAKAPx0/wCC9f7b91+wJ/wS6/aU+MfhrW5NC+KfjDQIfgd8Fb21nNtqlr8Tfi2Lnw7Z67os+QI9Y8C+Fv8AhK/iRYltymTwaVMcu7y3AP8AGTJJJJJJJJJJySTySSeSSepoASgD96/+Dc//AIJp6Z/wUm/4KJ+DfD/xJ8PRa/8As5/s/wCmj43fHjT7+Nm0rxPpei39vZeCPhreYKrcRePvGk+nW+taczo994G0jxoYZEmgQ0Af7A1ra21jbW9lZW8FnZ2cENraWlrDHb21rbW8axQW9vBEqRQwQRIscMMarHHGqoihQBQBPQAUAFAHhP7Tn7OXws/a5+AHxZ/Zr+Nfh628TfDL4xeDNW8G+J9OuIo5JreO/iEmm67pMsiubDxH4Y1iHT/EnhnVodt1o/iHStN1S0kjurSKRQD/AA8v2nv2fvG/7KX7RXxt/Zr+I8ca+N/gb8TvGPwy8QzwIyWepXfhPW7zSo9b03ezs+ka/a29vrekTF28/TNQtJgzCQEgHhVAH6e/8EZP2qdW/Y1/4Kefsa/G201yfQfDsPxq8I+APiXOs2y0uPhR8VNSh+HnxETUoHP2e8trDw14jvddtobkbIdW0jTdQgkt72ytbqAA/wBrugAoA/xbv+C7n/KYb/gof/2cv44/9CtKAPyYoAKAPVfgd8Evij+0j8Xvh38CPgr4Q1Tx78Vfir4p0zwd4I8J6PGHu9V1rVZvLj82aRkttP02xgWfUta1nUJrbS9D0azv9Y1a7tNNsbu6iAP9c3/gi5/wQt/Z1/4JL/DOz14WulfFb9r7xn4ftrf4s/H3UdPR5NM+1xQz6j8PPhFb3kX2jwj8O7K7HlXN0gh8R+PLm2h1jxXOlrBoPhrwyAfutQAUAFABQB/LV/weCf8AKHrVP+zl/gf/AOg+MKAP8pygAoA63wB4G8UfE/x34K+GngjSp9d8afEPxb4c8DeENEthm51jxR4t1mz0DQNKtwes+oatqFpaRDvJMtAH+2F/wS6/4J9fDf8A4JmfsY/CX9lnwDFpt/rPh3SU134teO7OzW2uvib8YNfhguvHXjS8keNL2Wzm1FRo/hS11B5rrRPBWkeHNAaaUaYJHAP0JoAKACgBkscc0ckM0aSxSo8csUqrJHLHIpV45EcFXR1JV1YFWUkEEE0Af5MP/Bzx/wAErtH/AOCdv7cMXxH+DfhK28MfsuftZ2mr/ED4caNo1r9n8PfD34iaRNZRfFj4Y6dbxgQabpVpqWqaX418KadDHa6dp/h3xjD4Z0WA2vhO48oA/mooAKAP9FP/AIMpP2y7rxR8If2of2EPE1/LcXXwq8RaZ+0J8K453aVo/B3j9rbwn8SdFtgSFtNN0DxjpXhTXoIQGNxqfxE1qcsu3DAH90tABQB/g3/tIf8AJxHx6/7LR8Uv/U412gDxegAoA/tW/wCDQb/glNoX7QXxf8Yf8FGPjf4X0zxD8Mv2dPEh8CfAXw/rtlb6hp+vftAjTNL13VvHc+n3sc1vNB8IvDetaNc+HJpraRP+E68VaXr2lXdrrHgFqAP9KKgAoAKACgD8Bv8Ag5S/YQ0D9tv/AIJafHTU7Tw9Z6h8Yv2XNA1X9pD4Q64Yl/tbT1+H9kdX+Kfhy0nQpc3Np4y+F1j4lsRoiym21HxRY+EdQltrm90TThGAf5AFABQB+sf/AAQp+Ler/BX/AILA/wDBPLxfos/kXOtftNeAPhNdN5jRq+jfHi7l+COvRORncsmjfEK+AVvlL7dxX7wAP9pWgAoA/wA3j/g9G/bgu/Hv7SXwI/YJ8Ka3K3hL4CeEY/jP8VtMtbgi0vPi58T7SW18Fafq9sSd2oeCvhfF/bOkzqEAsvi7qCMZWIEIB/EdQAUAf6cP/BoX/wAEydD+AP7JN/8At/fEnwtZt8b/ANqz7fYfC7UdRtlk1XwT+znompfYrJNOMoL6bc/FXxVpV74p1SWHP9p+EtI+HssckaPeRSgH9i9ABQAUAFAH8gP/AAeAf8E8dC+Pv7EOkftw+DPDVkfjN+yHq2k23jHW7S3VNY8R/s8eNdZXRNc0W8eLZNqaeBfHOt6B4z0j7U00Xh/RLz4iXFrHEdYvXYA/zBaACgD/AEOv+DJT9qnVtd+HX7Y37GXiLXJ7my8Aa94H+P3wv0e5m882eneN49S8F/FSOwEhMtpplprHh74c3/2OEmz/ALU8S6pfLFBd313LeAH931ABQB/LR/wc+/8ABHIf8FBf2ZP+GnfgZ4WF/wDte/st+GtU1HT9P0mz83WfjP8ABS0e61vxX8MhHbobrVPE/hmWTUPGfwztkF1cXGpy+JvCWn2ct943gubEA/ynyCCQQQQcEHqD3B96AEoA/Yf/AIIhf8FSvFP/AASn/bb8IfGC4udV1H4CfEL7D8OP2l/BVh5tz/bfw01DUI3TxZpemKTHdeMvhnqLjxb4XZUjvL+GHXPCMd7Y2Hi7VJiAf7Jvg3xh4W+IfhHwt4+8Da/pfivwV438O6L4u8I+KNDvItQ0XxH4Z8R6bbaxoWu6RfQM0N5puraXeWt/Y3UTNHPbTxSoSrCgD/OP/wCDuP8A4K+/8Lw+K6/8EzfgL4o8/wCEvwM8RW+s/tMa5o15m08d/HDSyzaX8NXuLZzHe+Hvg8JGn8Q2kkslvc/FC5ls76xt9T+G2n3c4B/E3QAUAf2+f8Gkn/BHD/hcfxCt/wDgp9+0P4V874W/CTxBd6Z+yr4b1qz3Wnjr4vaJO9rrPxca2uk8q88OfCe7WXTPCNykc0N38UFutStruy1H4bNDfAH+j5QAUAFAH8SP/B7r/wAmjfsVf9nHeMv/AFWV7QB/m60AFAH+jF/wY/8A/JvX7en/AGWb4Pf+oR4qoA/uWoAKACgAoAKACgAoAKACgAoAKACgAoA/zB/+D1T/AJSm/AL2/YB+Fh/82K/ap9jmgD+QTAzk9c+/qTnpz6jtjPoTQAuee/Xvn0/zx2+8eKAE7Y+vfjHfJxz169uvO00AA6jHtnk56Y5GB/nnpmgAPJz+PJIz77v6fj35AF7556e/rnpj+vNACNg++M9+/Hsfp9eOpoAcD/nBH8/8/wBQBB/T3J4z/nP8XagBO+e49fx4/HPGM/jj5gA753L2/QY6Z9+n457UAA47r+fv6/8A1vzzkABxjqp/H6d/w9OevfgAOMg5XjPf2x7k5/T3oACcjG5fz/z/AJHvwAHGc5HUnr6/z/THvQB/aN/wZNMv/Ddf7WQ3KSf2SIjgMCf+S0eAM+/G5c/7y+ooA/0raACgBrdP+BJ/6GtAH+C38aiG+L/xUKuhDfEjx5yCDwfF2snrnupBHqCD3zQB5l3+8vr178/zz/Lk45AEH+8ufr75659evr+tAB6/MvII/M59f/1e/WgAPf5hz79eCPw4PvmgA9PmXjPf2x68+2eaAD/gQ7d+nGPx/TPXtggCHOR9c9Dx0/PHT1/E0AHX/wAez27YyfT8enXocUAHU89xk8YHJz159OvOelACnJOee+eMdRjnrjp36de+KADnP69Cf4sn/wCv+XXmgBP/AK/bP8Q7duAOPQD1oAXr9f8AE5bA/ix3Hboc9aAE/mevUknDdfQ/57GgBOTjqfz9T+fXr70AO7de3PB7579up+bv+ByAf6+//Brn/wAoKf2Gv+7mf/Ww/wBoL/OO1AH7+0AFAH8BP/B73+0PeJB+wz+ydpt4VsLib4mftD+M9P8ANJE15Zppvw2+Gl55IICm2hvvizB5sgYv9r2QlNk/mAH+f7QAUAf6Z/8AwZZ/s7aX4F/YJ/aC/aRuLPy/Fn7QH7Qsng+O8aIDz/h98EPCum2/h1Ypm+c48afED4kpcImIz5FsSWkQiMA/spoAKACgAoAKAP8AJb/4OzPhHpXwv/4LNfF/X9Jg+yxfG34U/BT4u3lukax266rJ4PX4a6ncW6rwf7RvPhrLql5Ifmm1O9vpWyzkkA/mroAcjvG6yRuySIyujoxV0dSGV1ZSGVlYAqwIIIBBzQB/urfsQ/FnVvj3+xf+yL8c9enW5174zfsxfAT4q65cKSwm1n4hfCvwp4t1WTJ5y1/q9wWDfMGyGAYEUAfUFAH+Ld/wXc/5TDf8FD/+zl/HH/oVpQB+TFABQB/oa/8ABmN/wTp0/RvAvxc/4KX/ABE0O0udf8ZX+rfAf9nR762hnn0bwpoVxBJ8X/HemtKsy29z4m8RJp/w+03ULZrTU7Gy8JeO9NkMml+JnWYA/vDoAKACgAoAKAP5av8Ag8E/5Q9ap/2cv8D/AP0HxhQB/lOUAFAH9C//AAa4/s96T+0B/wAFlv2cZvEWlrq/h74F6N8Qv2hL+0kQNFFq3w/8NTWHgHVJCVby20P4n+JfBGt2zjB+2afbJuG+gD/XhoAKACgAoAKAP5p/+Dr/APZZsf2h/wDgkT8TfH9vaxyeMf2U/HXgT49+G51j/wBKl0hdU/4Vx4+0z7Qqs8dgfB3jzUvEt1Af3Nze+FNLaTD28MkYB/kv0AFAH9Ln/Bpd8cP+FRf8FlfhV4Unu/senftDfCT40fBS/kd9lu8ieFl+MGi282TgveeI/hHo9haDBZ728t4lx5hNAH+s5QAUAf4N/wC0h/ycR8ev+y0fFL/1ONdoA8XoAKAP9oz/AIIU/sxaV+yX/wAEm/2JfhlZ2q2+teIvgv4b+NHjmVofKvZ/HXx2g/4W14httRbG6e58Oy+LYPCEEr5I03w7YQJ+6hjAAP1soAKACgAoA53xf4W0bxz4S8UeCfEdqL3w94x8O634W16ybGLvRvEOm3Ok6pancrLi4sbueI7lYYflSOCAf4IXiLR5/DviDXfD9ySbnQtZ1PR7gldpM+mXs9lKSuTtJkgYlcnB4yaAMagD60/YG16bwr+3X+xZ4nt932jw5+1p+zlr0Gw4fztH+MPg7UItp7Nvt12nPBoA/wB0SgAoA/xDP+Crn7Q95+1Z/wAFJf21/jzc3hv7Hxt+0P8AEW18LXBlM5/4V/4M1mXwF8NrfzSSJPsfw/8AC/hqz3JtiPkfukSLYigH59UAdP4J8Jat4+8Z+EfAugRefrvjXxPoHhLRYMM3nat4j1W00fTotq5ZvMvLyFMKCxzgc0Af7v3wN+EXhX9n/wCCvwi+BPgWD7N4L+DPwz8C/CvwnCY0iZPDvgDwxpnhXRzKkeVEz2GlQPOQWLzM7szMxYgHqdABQAUAFAHyt+3P8I9K+Pv7Fv7WfwU1qD7Rp/xT/Zx+NHgZwI1klgufEXw88Q6dp9/aq3AvtN1Ge11Gwk4aK9tbeVSGQGgD/CwoAKAP6X/+DSf4uax8OP8AgtB8IfBunXPkad8fPhF8evhT4iiZ2VLrTNG+HWp/G6zjK52NIuv/AAe0Z4y/PDqh3PggH+szQAUAFAH+XR/wdSf8Ecf+GLP2hW/bc+AfhX7H+y7+0/4rvZfGei6NZ+VpHwZ+P+qLd6xrWjJbwIIdM8HfFFINS8XeEY4yLPS9ft/GHhmCDStKtPClneAH8j1ABQB/VX/wTg/4OXfi1+wz/wAEuPj7+xld2et+KfjX4Zsv7J/YZ+ItwsOo6f8ADXSfiDeXsPjew8VyX0rM9l8J5bi78dfCi0e01iDUNa1c+EdVisvCej6bbxgH8seqapqeuanqOta1qN9q+s6xf3mqatq2qXdxf6nqmp6hcSXd/qOo311JLdXt9e3U0tzd3dzLJcXNxLJNNI8jsxAKFAH6jf8ABIP/AIJmfEX/AIKqftm+Bf2d/C/9p6F8N9MMXjj4/wDxJs7cPD8O/hHo99ax65eW080U1o3izxNNPb+FPA2nzR3C3PiPVba+vLY6DpWuXlkAf7Nfwf8AhH8OvgJ8LPh98FfhH4V0zwR8Mvhb4S0TwP4G8KaRGY7HRPDnh6xh0/TrRGdnnup/JhE19qF5LPqGp30tzqOoXNzfXVxcSAHo9ABQAUAfxI/8Huv/ACaN+xV/2cd4y/8AVZXtAH+brQAUAf6MX/Bj/wD8m9ft6f8AZZvg9/6hHiqgD+5agAoAKACgAoAKACgAoAKACgAoAKACgD/MG/4PVf8AlKb8A+uf+GAfhbjgn/m4r9qrPRhQB/ILznoc/T3/AN73P+OMigA59D1zyOc9O7/5HNACZ46HAz2P4/x+/wDP3oAXnrg/iPw/vZ79yfbAoAQ5PGD7cfX1Yj1/D8KADJ9+noffr8/1xnn0oAD7g/kfb0f1x+PvmgBcn3/EH39X/wA8e1ACZPv07Dtz/t/XHr1oA/pJ/wCDUbRNG8Q/8FjPhFpev6RpWuadP8I/2hGl0/WdNstVspHi+Gt48TvaahDc2zPG3zIzRsRyM4ZgQD/Ve/4Uv8Iv+iX/AA6/8IXwl/8AKagA/wCFL/CL/ol/w6/8IXwl/wDKagA/4Uv8Iv8Aol/w6/8ACF8Jf/KagA/4Uv8ACL/ol/w6/wDCF8Jf/KagA/4Uv8Iv+iX/AA6/8IXwl/8AKagA/wCFL/CL/ol/w6/8IXwl/wDKagDf8PeAfA/hK6nvfC/g/wALeHby6txa3N3oPhzRdGubi2EgmFvPPpljaSzQCVVkEMjtGJFV9u8BqAOuoAKACgDzWT4NfCSV3kk+GPw8eSR3kkd/A3hR3kkkdpJJHdtHZnd3Zmd2JZmJZiWJJAGf8KX+EX/RL/h1/wCEL4S/+U1AB/wpf4Rf9Ev+HX/hC+Ev/lNQAf8ACl/hF/0S/wCHX/hC+Ev/AJTUAH/Cl/hF/wBEv+HX/hC+Ev8A5TUAH/Cl/hF/0S/4df8AhC+Ev/lNQB8zftqfCL4V2H7HX7WF5Z/Db4f213bfs0fHua3uYPBPhaGeCaP4T+L3jlhmi0hJIpEcBldGVgR1wSCAf4esn3l/3IT/AOQIj07/AOec8EAj/wADng9x3+b6fj1wTmgA9c56env/AL3+A9O+QAz169DnIPp3+Y0AL379SO/rz/Fnr36dfpQAn5/kc8kHs2c9M5OfU8gUAH4Hv2Prz/F+eOfXvkAPzz16cng+h+vX5u5PBwAHT14z0B9eejc4/wD180AL2/Dnr/tf7XT+ee3GAD/X2/4Nc/8AlBT+w1/3c17f83h/tBe5/nQB+/tABQB/lTf8HhfxEn8Z/wDBX648LvdtNB8JP2ZPgr4Eht9+Y7NtXuvGnxQmQIOFkmPxESeRiN7o8QZiiRhQD+WCgAoA/dz9iH/g4s/4KMf8E+v2cPBX7LH7Ot58DtO+FngS/wDFuqaQPFfwpj8R+JLq/wDGnivWPGGs3Wr622v2Rv5f7S1qe1sz9li+zaXbWNn8/wBm81wD6z/4jAP+CxP/AEHf2bv/AAx0P/zU0AH/ABGAf8Fif+g7+zd/4Y6H/wCamgA/4jAP+CxP/Qd/Zu/8MdD/APNTQAf8RgH/AAWJ/wCg7+zd/wCGOh/+amgA/wCIwD/gsT/0Hf2bv/DHQ/8AzU0Afi9/wUN/4KLftGf8FOvjfoP7Qf7T83ga4+Inh34aaF8KbGfwD4UXwfpMvhXw74j8XeKNNN5pq3+oi41RdR8a6xHLf+ehlsksLbyl+y75AD4RoAKAP9p3/ghl4obxf/wSA/4J26szs5tP2XPht4Xy2chfBGmt4LRef4UTQFRe21RjigD9W6AP8W7/AILuf8phv+Ch/wD2cv44/wDQrSgD8mKACgD/AGx/+CNHwO0f9nX/AIJV/sD/AAt0aEQLbfszfDPxvrSCIQg+Mfi5okXxc8dybBkkS+NPHGvyK7/vJFYPIFdmUAH6Y0AFABQAUAFAH8tX/B4J/wAoetU/7OX+B/8A6D4woA/ynKACgD+0r/gyV8L2t5+3R+1x4zktY5Lzw9+yfb+Gra8ZcyW8Hi74veA9TuoI2/hF03gy2d+7fZVGcBsgH+ldQAUAFABQAUAfF/8AwUd+Gtl8Yv8Agn3+298Lr9FaLx1+yd+0D4egkZPMNpqV98K/FK6RqMaYO6fTNWFlqNvkMPPtYyVYZBAP8NegAoA/TX/gi94ml8Jf8Fav+CcWqw3D2z3X7ZPwC8MtJG7Rs0XjT4h6H4OngLKQTHdQa9JbSoTtkimeNwyuVIB/th0AFAH+Df8AtIf8nEfHr/stHxS/9TjXaAPF6AOj8H+HZ/F/i7wt4TtpfJufFHiPRPDtvNsMnlT63qdrpsUvlgqZPLkuVfYGUvjbkZzQB/vceFvDmleDvDPh3wjoNuLTQ/CuhaR4c0a1GMW2laHp9vpmnW42hVxDZ2sMYwqj5eABxQBu0AFABQAUAFAH+DN+0GiR/Hz43xxqqRx/F/4lIiKAFRF8Z60qqoHAVQAABwAKAPIKAPpP9jP/AJPA/ZS/7OT+Bn/q0PC9AH+7LQB49+0N4/b4UfAH45fFNJxbP8Nfg98TPH6XJ24t28HeC9b8RLOdwK4iOnCQ7gV+XkEZoA/wa5JJJpJJppHllld5JZZHaSSSR2LPJI7Es7uxLO7EszEkkkk0AMoA9D+EXxN8RfBX4r/DD4yeEIdIuPFvwl+Ifgv4m+F7fX7D+1dCn8ReA/Emm+KdEh1vSzNb/wBpaRLqelWyalYGeH7ZZtNb+dH5m8AH9Jn/ABGAf8Fif+g7+zd/4Y6H/wCamgA/4jAP+CxP/Qd/Zu/8MdD/APNTQAf8RgH/AAWJ/wCg7+zd/wCGOh/+amgA/wCIwD/gsT/0Hf2bv/DHQ/8AzU0AH/EYB/wWJ/6Dv7N3/hjof/mpoAguv+Dvb/gsFe21zZ3Wsfs2TW13BNbXML/A2EpLBcRtFNE4/wCEp5WSN2Vh3BNAH8u9ABQB+0X/AAbu+KG8If8ABab9gDVldkN38XNa8LkrnJXxv8NPHXgt1OP4ZE19kbttY54zQB/srUAFABQB8+ftV/sxfCH9sz9nn4r/ALMfx28Op4m+F/xf8KXvhfxFZjyk1DT5JGju9F8TaBdzQzrpvinwlrtrpvibwvqohlbTNe0rT73ypRCY3AP8Wv8A4KL/ALB3xd/4Jt/tcfFL9lH4xW73Gp+CtSGoeC/GMNnLZ6N8TfhlrUtxP4I+Ivh8SNKn2HxBpsTR6jZRXN23h/xNY6/4WvbmTU9CvgoB8PUAFABQB0ng3wf4q+Ifi7wv4B8DeH9W8WeNfG3iHRvCXhHwvoVnNqOt+I/E3iLUbfSNC0PSLC3V573U9W1O7trGxtYUaW4uZ44kBZhQB/sY/wDBC/8A4JQeFf8AglD+xloPw61O10rU/wBpH4rjSviB+0143sfJuhf+N3sXXS/h/oupqGkufBXwtsL258P6CVl+yarrFx4o8Yw21jL4suLK3AP2joAKACgAoA/iR/4Pdf8Ak0b9ir/s47xl/wCqyvaAP83WgAoA/wBGL/gx/wD+Tev29P8Ass3we/8AUI8VUAf3LUAFABQAUAFABQAUAFABQAUAFABQAUAf5g//AAeqf8pTfgF6/wDDAPwtx9f+Giv2qf8AZNAH8gfGe3frjoCf9nHPX370AL9T/wCg9Mdfu/h9PagBOMDnse4zjn/Zz6//AF6AD8vzXOMf7vTH6UAHHrxkemM8/wCzjOOv8+lABxnqOnPI/D+HHp/9fFAAceoP4j29F6/59aAHEDue567fx/h6+tACcc89hnpj2z8uPx/+tQB/S3/waXf8pmvg9g/80i/aH/8AVZ3npQB/rMUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfLv7b/wDyZj+1v/2bH8fv/VS+L6AP8LGT7y/7kH/omL/P/wBfFAEf/wBlnJ5zt78fn155PPFACeuc9O59x6A8enUdcUAL35z37j+77D0x6/iaAAnk9+vpk/MPbI5z+PQ96AEyeev6Zzkd8dfXv070AH/18cjrnnHy9Qf/AK3rQAuRjP59PRvYf1/LqAJnpxn/AL5/vH2P6dz78gDu3OenPPb5vbp6de34gH+vt/wa5/8AKCn9hv6/tNf+th/tBfSgD9/aACgD/IT/AODpjUHvf+C537aEDFiulWP7Nmnx5yPlb9lL4Iai2AecedqEvPQnLDIIJAP586ACgAoAKACgAoAKACgAoAKACgD/AGaP+DfP/lDH/wAE+/8AsiJ/9TTxZQB+yFAH+Ld/wXc/5TDf8FD/APs5fxx/6FaUAfkxQAUAf7x37NmmWmi/s6fALRrBPKsdJ+Cvws0yzj/552lh4G0K1t04AHyQxIvAHTpQB7VQAUAFABQAUAfy1f8AB4J/yh61T/s5f4H/APoPjCgD/KcoAKAP7mv+DH4W/wDw0B+3wzBPtY+DvwZEJP8ArPs7eNfGBugv+wZVtN/+0I6AP9F6gAoAKACgAoA81+M2n22rfB/4r6VeOkdpqfw18dafdSSf6tLa98L6pbTu/X5FilZm4PANAH+CjQAUAfZH/BOrVbrQv+Cgv7CmuWSSSXmjftkfsxaraRxcyyXWnfGzwReQJH/00eWFFT/aIoA/3MKACgD/AAb/ANpD/k4j49f9lo+KX/qca7QB4vQB9E/sg6QniD9rP9l7QZACmt/tE/BPSHDfdKal8SvDNmwbPGCJjnPagD/dvoAKACgAoAKACgD/AAaP2h/+S/8Axy/7LD8TP/U11ugDx6gD6T/Yz/5PA/ZS/wCzk/gZ/wCrQ8L0Af7stAHwd/wVO1B9J/4Ji/8ABRnVIywk079hH9ru9jK5Lebbfs//ABBmjxjODvReeg6sQATQB/h8UAFABQAUAFABQAUAFABQAUAFAH62/wDBBz/lMT/wTy/7OR8H/wDonUKAP9ougAoAKACgD+QH/g8Z+AH7J/iv9gXwd+0J8UvEdh4I/ac+F/xB0bwh+zldWNlDd+I/iva+Mb+CTx98KNSgSWG4m8KaX4dtL/4krrtyZIfCOr+GktLJ45fG17p2uAH+YLQAUAFAH9j3/Bmt8Av2Tvid+2v8Xfip8WPEdhqn7S3wL8A2HiD9m/4Va3ZQjT3sfEE1/oHxF+MOkXE0rpq/inwHa3Wi+G7DTDAh0G18dXPieBLy/trG/wDDgB/ptUAFABQAUAFAH8SP/B7r/wAmjfsVf9nHeMv/AFWV7QB/m60AFAH+jF/wY/8A/JvX7en/AGWb4Pf+oR4qoA/uWoAKACgAoAKACgAoAKACgAoAKACgAoA/zBv+D1U/8bTfgF15/YB+FnT/ALOK/ap9++fUfU0AfyC857//AK2+vt16YP4EAUZ756jr+fqep469e+OKAEySM898/hzz+Xp3577gABPHX6889+ee/wBT17cggASQf6fUk+/Xp0z9KADnOOehP55/2v6n8OtAAxI9e/T8B6+/+fvUAKPrnn1Pv/tHrnP9PQAOvftnrzzn3HI7/pigD+lv/g0v/wCUzfwe/wCyRftD/wDqs7ygD/WXoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoA+Xf23/+TMf2t/8As2P4/f8AqpfF9AH+FhL95P8Ach/9ERc/z/WgCPJ4OTxnn14BHfv7559RwAA+737evv06npn3/rQApOD1PGfXnjPc+/Gc89cjigAOc8k+mef72PX2yeenqeaAE/4Efrn1Iweo6j69D3HAAp+pHXnPTBx6/wBfxY0AHPqeOc5PcHnt9OpHPXrkAbyOpI659snvz36/49aAHZwOvT69eeP078dOOuQD/X3/AODXPn/ghT+w1/3cz/62H+0F/n+p60Afv7QAUAf5Gn/B1n4dudE/4LfftSanPAYovF/hD9nPxFZSEcXNtbfs9fDXwk86+oW88L3dtnn5rdh2oA/nSoAKAP7tv+CSn/Brf+wt/wAFDf8Agnl+zd+2J46/aA/ao8LeMvjHovjefxP4e8D618I4fCmla14J+KXjn4b39vosOvfCjXNXitTN4PaZkv8AVr6ZZpZf33l7FUA/Rn/iCd/4J1/9HP8A7af/AIP/AIG//OToAP8AiCd/4J1/9HP/ALaf/g/+Bv8A85OgA/4gnf8AgnX/ANHP/tp/+D/4G/8Azk6AD/iCd/4J1/8ARz/7af8A4P8A4G//ADk6AD/iCd/4J1/9HP8A7af/AIP/AIG//OToAP8AiCd/4J1/9HP/ALaf/g/+Bv8A85OgA/4gnf8AgnX/ANHP/tp/+D/4G/8Azk6AD/iCd/4J1/8ARz/7af8A4P8A4G//ADk6AP6i/wBi/wDZV8E/sQ/sufBj9lD4ca/4q8U+B/gj4T/4RDw54g8bTaTceK9UsP7U1HVvtOtzaFpWh6RJd+fqc0ebHSbGHykjHlbw7uAfT1AH+Ld/wXc/5TDf8FD/APs5fxx/6FaUAfkxQAUAf7z3wC/5IT8Ff+yS/Dj/ANQ7RqAPWqACgAoAKACgD+Wr/g8E/wCUPWqf9nL/AAP/APQfGFAH+U5QAUAf2uf8GR+trB+2j+2P4cMqh9V/Zf0LW1hLANIvh/4reGrB5QmcssJ8TIrMAQpnUEguMgH+k5QAUAFABQAUAfP37WniSDwb+yt+0x4vurmOztfCv7P3xm8SXN3K4jitYND+HPiTVJrmSQkBI4I7VpXckBVUsTxQB/hEUAFAH3//AMEoPD3/AAlX/BUL/gnRoLR+bDf/ALcX7K32xMbt2n2vxv8ABF7qXHfFhb3J544yeM0Af7fNABQB/g3/ALSH/JxHx6/7LR8Uv/U412gDxegD6l/Ya/5PY/Y9/wCzpf2ff/Vs+EqAP91OgAoAKACgAoAKAP8ABo/aH/5L/wDHL/ssPxM/9TXW6APHqAPpP9jP/k8D9lL/ALOT+Bn/AKtDwvQB/uy0AfF//BSDw7c+L/8Agnh+3p4SsoDc3nij9i/9qPw7aWyjLXFzrfwP8c6bBAo5yZZblIwO5agD/DXoAKAPrT9gv4GeAv2nv21/2Uv2bvifrniTwz4D+Pnx++Fnwb8Ra/4Pn0u28T6TbfErxhpXg+3vdEn1vTNa0mPUIr/WLUwnUNKvrYkkPA+RgA/0G/8AiCd/4J1/9HP/ALaf/g/+Bv8A85OgA/4gnf8AgnX/ANHP/tp/+D/4G/8Azk6AD/iCd/4J1/8ARz/7af8A4P8A4G//ADk6AD/iCd/4J1/9HP8A7af/AIP/AIG//OToAP8AiCd/4J1/9HP/ALaf/g/+Bv8A85OgA/4gnf8AgnX/ANHP/tp/+D/4G/8Azk6AD/iCd/4J1/8ARz/7af8A4P8A4G//ADk6AD/iCd/4J1/9HP8A7af/AIP/AIG//OToA+kf2P8A/g04/Yd/Yy/ac+Cn7U/w+/aC/at8TeNfgb450zx94b0DxlrXwhn8L6rqmlrOsFprkOifCbRtWksJPPYyrYarY3BwNlwnOQD+pigAoAKAOa8Z+MvCvw78IeKfH/jvxDpPhLwT4I8O614t8X+KdevYdO0Tw54Z8O6dc6vruu6vf3DJBZabpWmWl1fX11M6xwW0EkjkKpoA/wAc3/gud/wVe8Vf8FX/ANs3X/iRp1zq2l/s4fCo6r8P/wBmXwPf+dbNp/gZL5G1Px9rWmMRHbeNvilf2dt4h18NGbrS9Jt/C/g6a6v4fCdtezgH4vUAFABQB9A/srftOfF79jX9oT4U/tN/AnxHJ4Y+KPwg8V2Xinw3fHzZLC/SNZLTWfDev2kU0Dan4X8WaHdaj4a8UaS00SapoGq6hYtJH5/mKAf7SH/BOL9vb4Rf8FKP2Rvhf+1b8H7hLax8YaedL8deC5byK81n4YfFDRYbaLxv8O9eZFikN3oWoTpcaVfzW1mPEPhfUfD/AIptLWLT9dswQD7noAKACgAoA/iR/wCD3X/k0b9ir/s47xl/6rK9oA/zdaACgD/Ri/4Mf/8Ak3r9vT/ss3we/wDUI8VUAf3LUAFABQAUAFABQAUAFABQAUAFABQAUAf5g/8Aweqf8pTfgFxn/jAH4W98f83FftU/r/WgD+QTjPQenUY5J/Xj69c80ALx6d8c4/u59/5/pxQAnG0cdd3/ALNxnr6/4+oADGQMDnvkHqCcfTt9MevIAEgDpnn+np/Q+o+gAF4z07Z6jPU+/wD+rj0GABGx6evt3H9T1/oc0AO69RQAg+nZT/P6c0Afoz/wSv8A+Chmtf8ABL/9sDwn+1r4f+FulfGDUvCvhL4g+FYvBGs+Kr3wbY3iePPDk3h2W+fXNP0bXrmF9NWT7UkC6dILrDQma2YrOgB/Un/xHAfF/wD6R6/Dn/xInxb/APOvoAP+I4D4v/8ASPX4c/8AiRPi3/519AB/xHAfF7/pHt8Of/EifFv/AM6+gA/4jgPi/wD9I9fhz/4kT4t/+dfQAf8AEcB8X/8ApHr8Of8AxInxb/8AOvoAP+I4D4v/APSPX4c/+JE+Lf8A519AH7g/8ELP+DhDxz/wV++P/wAYPgx4p/Zj8KfA6y+GPwcT4oW2v6D8Udb8d3Wr3L+OfDvhH+x57DU/B/h2G0h8vXZL37XHdTyh7VIPJZZ2eEA/qFoAKAEJxz7gfmQP60Af58Xjf/g9f+LnhLxn4t8Kp/wT/wDh1fJ4a8T+IdAjvG/aE8WQPdR6LrN9paXLwr8MXWJ5xaea0au6qWwGxwADl/8AiOA+L/8A0j1+HP8A4kT4t/8AnX0AH/EcB8X/APpHr8Of/EifFv8A86+gA/4jgPi//wBI9fhz/wCJE+Lf/nX0AH/EcB8X/wDpHr8Of/EifFv/AM6+gA/4jgPi/wD9I9fhz/4kT4t/+dfQB5v8ZP8Ag86+Kvxg+EPxV+Et7+wR8PtFtPij8NfHnw6uNag+P/iq+n0eLxx4U1fwvJqsNlL8NII7yTTl1VrxbR7i3W6MPkfabYuLiMA/iaZixU9OEX1+7HGoP44yR69+M0AN/wDss+vTn+fvzySaAA+/90fjkjpyePw/AZoACDnrnGc5P+zn9R9emaAF5z29ef8Ae+vTIzj37nmgA5/X9QwPr15weecdT1oATGMdzzj22nJ/PHt7nvQAeo4HY+/Dc9sd+T/+sAOTjGORnjjoT7+p/H6igBecf8BBP0+b3+vr1HAxQB/r7/8ABrn/AMoKf2G/r+01/wCth/tBUAfv7QAUAf5j3/B6b8G7vwj/AMFE/gF8Z4YGTRPjL+yzo+iPOUwJ/F3wq+IPjWy1wLIMBxD4Z8XeA1KHMkbMSzFJIlUA/jooAKAP9T7/AIM7fj/p/wAUP+CUl78HmmjTXv2Zv2gPiX4MmsPM3zt4Y+Isth8X9C1pk3HyrfUNe8aeNNKgX5S03h67cr8wdwD+rqgAoAKACgAoAKACgAoAKACgD/Fu/wCC7n/KYb/gof8A9nL+OP8A0K0oA/JigAoA/wB574Bf8kJ+Cv8A2SX4cf8AqHaNQB61QAUAFABQAUAfy1f8Hgn/ACh61T/s5f4H/wDoPjCgD/KcoAKAP6e/+DRL4uRfDX/gsf4K8JzXsdpH8ePgJ8b/AITBJZViS8n07SNJ+M9taDewV5pJvhErQJy7yIEjBdgCAf6vtABQAUAFABQB+NX/AAcGfHCx+Af/AARy/bw8UXOpx6de+M/gxf8AwS0ZDJsudS1H476rpnwkm0+xRf3k0/8AY/i/Vr6dYwTDp1jfXkhSC2mkQA/xnKACgD9u/wDg3I+Dt58av+Cz/wCw3ocEcn2LwV8Q9e+MWsXaLujsbP4PeA/FXxDsZLg4bbHfeINA0XRY2x/x9apbjcgYyKAf7HtABQB/g3/tIf8AJxHx6/7LR8Uv/U412gDxegD6l/Ya/wCT2P2Pf+zpf2ff/Vs+EqAP91OgAoAKACgAoAKAP8Gj9of/AJL/APHL/ssPxM/9TXW6APHqAPpP9jP/AJPA/ZS/7OT+Bn/q0PC9AH+7LQBz/izw3pvjLwr4m8Iayhk0jxX4f1nw3qsa7S0mm67p1zpd8i7wy5a2upVG5WXJ5BGRQB/gw/FD4f658J/iX8RPhZ4nj8nxL8NPHXi34f8AiGLY0fla54N1/UPDurR+W5Lx7L/TbhdjEsuNrEkGgDhaAPYf2efizefAT4//AAN+Omn2rX1/8F/jD8M/izY2SP5b3l58OfGuieMLa1WTcmxribR0iV967S+7cMZoA/3efC3ibRPGvhjw54x8M6hDq3hvxboOkeJvD+qW5LW+paJr2n2+q6TqEBPJhvLC7t7mInkpIpoA3qACgAoAKACgAoAKACgAoAKACgD/AD/v+Dun/gsh5j3H/BKj9nXxV8kZ0bxD+2P4r0K94eQfZdc8H/AGG8t36Rn+zvGvxNjhJzJ/wiXhOS7DReNtEIB/ARQAUAfs5+x5/wAEQP2tv20P+Cf/AO1N+358M9LkXwj8AGj/AOEB8Cy6Rd3PiX9oCPwvv1H4zv8AD8pLGzj4Y+Gmgv4PKtNR/wCEz1+PVPBfh/zfEek3logB+MdABQB/Qz/wbq/8Fgr/AP4Jdftc2/hn4oa5dj9j39o3UNE8I/HGwmkmnsvh5raTPZ+D/jjplmu9op/B897LYeNo7NGm1jwDfaq5stV1vw/4VhtgD/XU0/ULDVrCy1XSr201PTNTtLbUNO1LT7mG8sNQsLyFLmzvbK8t3kt7u0u7eSOe2uYJJIZ4ZElid0dWIBboAKACgD+JH/g91/5NG/Yq/wCzjvGX/qsr2gD/ADdaACgD/Ri/4Mf/APk3r9vT/ss3we/9QjxVQB/ctQAUAFABQAUAFABQAUAFABQAUAFABQB/mD/8Hqn/AClN+AXXn9gH4W98f83FftU/56igD+QTvjn/AL69yM/XuT+FAC9+/X19R/hz1+hzxQA0dM89+/sf1P0HT8wBe4PPPvnHB75OfX/OCAB/4FyR3Oe+eOvbP4+2KADvj5umfve59+/1/LrQAHj+8evVsdx69ucf5zQAuM/3h+Pr36n/ACaAEH49B0P19x1/r2OaADvjnv0Pu3+e/Tv3ADqe/Ufxeq59f17+vagAXnHXkHq3v/n0/HqAAHI/i+uc9gfXv9O/4UAHcDnqe+T0/pn19+c0AKeM9e3fHXHf8ufc8+gA059/4uSSenGf1/8A1k8gH9o//Bk3/wAn0/tZf9mlQ/8Aq6vANAH+lbQAUANbp/wJP/Q1oA/wWvjT/wAlf+Kn/ZSPHffH/M3a1/n9e2aAPMuc9e59fV/f+v8A9cABnA/DnJ7t6Z578nH9aADnn6N/M54z+vJ56HFABz83J4wep44J9e/6Z70AHOR6jPXnPHXr+OOPxzQAvPHXnHc8/KT6+v8AnmgBuDx04Pv/ALPr36enXr1wAHr/AMC756qD1zzxx+vNACnIz9OuSe4569+Ppj3oAU9/x6k8fLnoTznmgBOc/n69d3Xr644zk/jmgA5zyP1PqO5x0455z79gBcHjqfvcZPY465460AJjGffnB/4F3+mefx57gAM/Xr3I7n+Rz1xnPfGaADn9PXn+Lv69fpnpxQB/r7/8Guf/ACgp/Yb+v7TXX/s8P9oKgD9/aACgD+Qf/g8n/ZIuvjN/wTy+Gf7Tnh3R5NR8SfsifF+C48Q3UMJll0/4RfGyHT/BXiy4/dgzFI/iHpXwgkmODDb2S393MY44WkAB/l/0AFAH9T//AAaa/wDBRSw/Y8/4KCXX7PHxF8Q2mh/Bj9tvS9H+HNzfardxWel6D8b/AAvNqd/8F9XuLq4fZAniK41nxP8ADJYIlT7brXjrw3c3kyW2kAqAf6rNABQAUAFABQAUAFABQAUAFAH+Ld/wXc/5TDf8FD/+zl/HH/oVpQB+TFABQB/vPfAL/khPwV/7JL8OP/UO0agD1qgAoAKACgAoA/lq/wCDwT/lD1qn/Zy/wP8A/QfGFAH+U5QAUAfTn7Fv7R2t/sg/tb/s4ftP6B9qkvvgZ8ZfAHxHubG0cJNrWheHfEVjd+KPDbEvHm38T+Ghq3h68TzI/MtNTnTzYy29QD/cn+G3xF8FfF/4eeBviv8ADfxDYeLfh98SvCPh3x54H8UaXIZdO8Q+E/Fmk2mu+H9ZsnZVc2+o6XfWt1GJESVFlCSokisgAO1oAKACgAoA/gD/AOD1D9vLQby1/Z4/4Jz+Cta+2a5petRftK/HO3sp18rSP+JPrfhL4OeF9ReBmLX1/aa1448X6no135TWlkPAmteTOup2E8AB/AHQAUAf2of8GUn7Omp+Lf2z/wBp/wDafvdJlm8LfBj4A2vwv0zVJowttB4++M3jPRtVtTYyuAZ7608HfDLxZa3qQFxZ2mv25vBGb+xMoB/pUUAFAH+Df+0h/wAnEfHr/stHxS/9TjXaAPF6APqX9hr/AJPY/Y9/7Ol/Z9/9Wz4SoA/3U6ACgAoAKACgAoA/waP2h/8Akv8A8cv+yw/Ez/1NdboA8eoA+k/2M/8Ak8D9lL/s5P4Gf+rQ8L0Af7stABQB/kFf8HN/7JF1+yj/AMFev2i7qx0eTTfAn7SkulftQeBbnySlvqL/ABSW4b4lyI6DyPOi+Muk/ETfAjebFZy6fNMiC6jZwD+fygAoA/1nP+DWf/gopYftq/8ABN3wd8HvF3iG0u/jv+xfDpPwO8X6XNdxHWdW+Fmm2AT4H+ODZ72uDp03g+0Pw+uL+ZpJ73xF8PNa1C6MZ1K2EgB/S5QAUAFABQAUAFABQAUAFABQB+K//BdX/grD4X/4JQ/sZ678QdJu9K1L9pT4tLq3gD9mXwTfCG6F340NjG2r/EPWtMcs9z4L+FljfWuv62rxG01bW7rwr4PnuLFvFcV9bAH+Ol4w8X+KPiD4t8T+PPHGv6t4r8aeNfEOs+LPFvijXr2fUtb8R+JfEOo3Gr67rusahcvJcX2p6tqd3dX9/dzu8txdTyyyMXcmgDnKAPvb/gmh+wF8VP8Agpf+2J8K/wBlP4WpPYDxXqB1v4j+NxZvead8MPhNoM9rN468f6qmUhb+y7CeLT9Bsbm4tItf8Yat4c8Mpd29zrUEqgH+01+zr+z98K/2Vfgb8Lv2dPgl4ag8JfCv4P8Ag/SvBXg3Q4Skksem6ZERNqOqXaxxNqniDXb+S813xLrVwn2zXPEGpalrF88l5fTyMAf5e/8Awc+/8Egf+HfH7Vp/aO+Cvhf+zv2R/wBq3xBq+t6DZaVaeVovwi+NEwn1rxt8LdlugtNK0DXQbvxv8NbQC0gTRX8R+FdKsvsfgGS6uAD+XegAoA/0g/8Ag0o/4LJf8Lo+Hdr/AMExP2iPFXnfFf4Q+HrrUf2WfEmt3m678ffB/Q7drnVvhO9xdP5l54l+Etmj6h4Ut0lmnvfhek9hbWlpY/Dae4vgD+3WgAoAKAP4pf8Ag9w0q5m/Ys/Y51xR/oenftRa3pU5weLnWfhR4ovLUZ6DMWhXhweTjI6GgD/NhoAKAP8ARH/4MeNdtLj4O/8ABQnwyjob/SfiX+z/AK7cRhwZFtPEPhf4n6fZu0edyo03hi/VHIw7I6g5RqAP7saACgAoAKACgAoAKACgAoAKACgAoAKAP8yf/g9c8Mazbf8ABST9mvxnNZzJ4f1z9h/wj4a02/MbiC51fwl8evj5qeu2cUpAjeays/G/hyaaNGLxJqEDSKFljLAH8cfp9PVuvzY5z+pPTPOKAF/+v1Jz933IP/1ueM5IAg6fge5x/F2z/TPU84yAA7n1xzy390nnJ9enfH0zQAHpz6jqTjp9fU9zjHPvQAdz+Hds/jz9cdz6c8gB/PHqx5z7E8f1+nAA5v8AHqSO/wBR78/T2yAJ6/RehP8Aj+X9c8gCenryep/2u+cfrnrzQAuOevcdzn7p69s/5PWgBOg4PY9yB97tkg/59+QAxwT9O5x0HUf596AFPb/gXUn09c8e+TkH0oAD1x7+/qv+P+eSQBPT3DZ5P+P58+/PWgD+0f8A4Mm/+T6f2sv+zSoff/mtXgGgD/StoAKAGt0/4En/AKGtAH+C38af+Sv/ABU/7KR469f+hu1r0/T396APMv4h079+erdvx649efUAAOB26dCcfe/HP/1+e1AB69+H9fX/AOvz/OgBD/Fz6dz39f6dfT2oAUfw/j3Pp2yc4/OgAA6fh3PPDf57dx7UAIO3Tr6+6+pz/wDqHrggB6/8C5yT2HX+vXn0PFAARweeoByST3/Hj0/Hk80AKR1/Hux/h7//AF/50AHf8+5zy3p/9bGepIoAMdfqRnJz1X36469ume4oAMcjnqW7nPXt/XP60AIMZP1/2s9G9ec/560AKO2fQ9CT39ic+/XtntQAoUkhR1baMdfvbgMn0yeue1AH+wp/wbL+GdY8J/8ABDr9hHS9cs5rG9u9A+N3ieGGeN43fR/Gv7TXxp8ZeHrwLIqsYtQ0DXtMv4JANk0FzHLEWjdWIB+71ABQB5B+0D8D/AX7S/wN+Lv7PfxR09tU+Hnxp+HXi74Z+MLSLyluxofjHRLzRLy702aaKZbTWNOS8/tHRtQEbS6dqtrZ38G2e3jYAH+If+25+yP8Tf2E/wBqr42/sofFu1ePxj8HPG2o+HP7VW0ms7Dxd4bk2al4N8d6LDOzyDQvHHhS90fxTpCyO00NlqsVtdbLyC4ijAPlagCSKaW3linglkhnhkSaGaJ2jliljYPHLFIhDxyRuA6OrBlYBlIIzQB/pdf8G/n/AAcwfDL9ozwX8Pv2NP2/PHWnfDn9pfwvo9h4T+H3x68ca3aaf4H/AGhrPTIls9HsvF3iTVJLa28K/GZ7GO2s7iTW7s6V8TNSge/0/VbfxfrEXhm9AP7OVZWUMrBlYBlZSCrKRkMCMggg5BBwRzQAtABQAUAFAHwv+2F/wUv/AGEf2CdFn1b9q79pv4Y/Cm/S2N3Z+CLrWW8RfFHWotm9H0H4V+E4dd+IWsQOWjja+svDkunWzzwG9vLaOVZCAfHf/BLD/gt/+z//AMFcPix+0z4J/Zy+GfxM8NeBf2c9G+Guoj4g/E1tC0bUvH1z8QtR8c6fjSvBGjXmvS6HpGnDwU11aX+r+Iv7V1KLVI0vPDuiT2skcoB+1lABQB/i3f8ABdz/AJTDf8FD/wDs5fxx/wChWlAH5MUAFAH+898Av+SE/BX/ALJL8OP/AFDtGoA9aoAKACgAoAKAP5av+DwT/lD1qn/Zy/wP/wDQfGFAH+U5QAUAFAH9z/8AwbGf8HCHgH9n3wp4c/4Jy/tyeM9K8EfCfTbvVG/Zs/aA8U6j9i8PeA5db1S61nUPhN8UNZvZTaaN4OudW1C/1HwN40v5bbTPCtxdXXhnX7m38OSaFdeHwD/RM03UtO1nTrDWNHv7LVdJ1WytdS0vVNNuoL7TtS0++gS5sr+wvbWSW2vLK8tpY7i1ureWSC4gkSWKR43ViAXaACgD8j/+Ctv/AAWF/Zm/4JP/AAN1vxf8RvEmh+K/j/4g8O383wL/AGdLDVVPjT4ja+xkstN1PV7WzFxd+E/hrp2oh5/E3jjVYbaxW0sL/SvD51rxXLpuhXoB/j1/tM/tH/Fz9rz49/FP9pX47eJX8W/Fj4weKrzxb4v1nyRa2n2maOGz07SNIsFZ49L8PeHNGs9O8O+G9Iid4dI0DS9N02FmitUJAPC6ACgD/X8/4Nov2IJf2J/+CUnwSh8TaJJovxV/aSlvP2mviZDe2rW2qWsnxHstNj+Heh3kdwiX1m+jfCjSPBAv9HuxG2l+JL3xGpt4Li4ut4B+/lABQB/g3/tIf8nEfHr/ALLR8Uv/AFONdoA8XoA+pf2Gv+T2P2Pf+zpf2ff/AFbPhKgD/dToAKACgAoAKACgD/Bo/aH/AOS//HL/ALLD8TP/AFNdboA8eoA+k/2M/wDk8D9lL/s5P4Gf+rQ8L0Af7stABQB/KX/wdm/8E2NQ/bE/YW0/9p/4Z6LLqnxp/YibxH44u9P0+1M+peK/gL4hh07/AIW1paJCglubrwUNE0X4lWLXEskdloXh7xva2FtJqGvoGAP8rugAoA+8v+Cbn/BQr43/APBMj9qvwL+1H8D7lL290LzdB8f/AA/1G+ubLwz8WPhrq89s3if4f+JntknaK21FbW11HRtWFpezeGvFGmaH4ltbO6udJjtpgD/Xa/4Jo/8ABV39kb/gqZ8HbD4k/s8+N7O08a2FhA3xN+BHijUdNtPi38K9XAhjurfX/D0Vy02qeGpbmZU0Dx5osd14X1+JhFFeWmtWuraJpYB+llABQAUAFAFW9vrLTbO61HUby10/T7G3mu72+vbiK1s7O1t0aWe5urmd44be3hjVpJppnSONFZ3YKCaAPwQ/bk/4OWf+CVH7Ef8AbHh6T42j9pb4raZ59v8A8Kx/Zjj034kyW+oRbojba/8AEYanp3wp0A2l4Bb6xYSeNLzxRpm2cjwzdzwG2cA/Wn9jz9oi0/a4/ZW/Z5/agsPCtx4Gsfj/APCDwJ8W7Pwdd6vHr934ZtfHXh+y8QW+i3OtQ6fpUOqXGnxXyW817FptlHPIjOlvGpAoA+kKAPNfjJ8X/hx+z/8ACn4hfG34v+KtN8EfDD4WeEtb8ceOfFWrSFLLRvDvh+xlv9QuSiK9xd3TxxfZ9P02zin1DVdQmtdN062ur+7t7eUA/wAZH/grv/wUx+I//BVL9szx3+0Z4s/tLQ/h5YtJ4J+Afw2u7kSQ/Dj4RaPfXUmg6dPFDLLaP4q8QzXFz4q8c6jDJMt54m1a8tbKddC03RLKyAPy/oAlggmuZoba2hluLi4ljggggjeWaeaVwkUMMSBnklkdlSONFZ3dgqgkgUAf64P/AAbdf8EiIf8AgmZ+x3beOviv4ditP2vf2nNP0Lxp8YmvrdDq3w18JrA954F+CUErrvs5vDdrfSa349jhCG78e6rqGmXFxqmm+E/Dd1EAf0ZUAfH37en7FXwi/wCChP7Kfxc/ZQ+NNlu8KfE3w/Jb6X4it7WG51vwB4205vt/gz4h+GjMyCPXfCOvw2mpww+dFb6tZpfaBqhm0bV9StbgA/xWf2vf2Vfi7+xJ+0j8W/2XPjnoh0T4k/CDxXeeG9X8pZjpeu6eUjv/AA54w8O3E8UMt74X8Z+HbvS/FHhu+khhludG1Wze4gtrnzreIA+bqAPR/g/8XPiN8A/in8P/AI1fCLxVqfgj4nfC3xbonjjwN4r0iUR3+ieI/D19DqGnXaK6vBdQGaEQ32n3kU9hqdjLc6dqNtc2N1cW8gB/s0/8Egf+Cm3w5/4Kq/sZ+B/2h/DH9maF8StKEXgj9oD4a2dwXm+Hfxb0mxtpNatbaCeWW7bwj4nhmg8VeBdQmkuDc+HdUg0+8uTr+ka7aWQB+pFABQB/KR/weOfCfVviD/wSR0jxvpcO+D4FftWfB/4j+IphGXMPhzxB4d+I3wcwWHESS+Kfil4VBduC6xx/ecUAf5X1ABQB/S5/wa3f8FKPAf7AH7f9/wCDvjZ4jsfCPwJ/a18Kab8JfFXi/VpzaaH4I+Iela1/avwm8XeI71nFvY+HRqV/4g8Gavqd4sen6FB43HiPVb7T9G0bUrgAH+szHJHLGksTpLFKiyRyRsHjkjcBkdHUlXR1IZWUkMCCCQaAH0AFABQAUAFABQAUAFABQAUAFABQB+Hv/BdX/gjr4Y/4K7/s0aL4T0XX9F+H37SHwY1HV/FPwD+I2uWc9zoIutbs7a28VfDjxw1jDcapB4G8eQ6do5vNW0mC51Xwt4g0TQPElrYaxZ2OqeH9ZAP8xD49f8EWv+CqX7OXi/UvCHxB/YT/AGktSmsLy4tIPEfwy+GHiT4xeBtXjhneOG80Xxv8L7HxT4dv7e7jCXECNeW1/HDMi3thaTh4UAPB/wDh3V/wUG/6MT/bM/8AEXvjZ/8AMV680AJ/w7p/4KD4x/wwn+2Z3z/xi/8AG3knPf8A4Qn3oAX/AId1f8FB/wDoxP8AbL/8Re+Nn/zFUAIf+CdP/BQbr/wwn+2Z1/6Ne+Nvv/1JXfNAB/w7p/4KDZJ/4YT/AGy+n/Rr3xt9/wDqSvf1oA+aviJ8NviN8I/FmpeAviv4A8bfDDx1oyWT6x4M+IfhTXfBPizSk1KzttT059S8OeJLHTtYsVv9OurbULJrqziF1ZXMF3AZIJY5GAOMLAdff17Z9v8AP8wBN3c/3V7HrzQAZ5B9c9j23f4igBAeRk+h7nPy4J6UAKp4GT2JPB9evv780AAbA59v/QR+H60AITnnPZvX+6OP6/8A16AFJ5PPGf6p/jQAm70POH7epyP8aAP7R/8Agyb/AOT6f2sv+zSof/V1eAaAP9K2gAoAa3T/AIEn/oa0Af4LXxqOPi/8VOcf8XI8d/8AqW63/wDr/wAehAPM88ge5P6vn+lACBicZPXH1zu/LpQAuTzz2Y/juPNACE8vz147+hH59P1oAUMSRzwSf0A9f1xQAm7pz09j/dI5/GgBdxOOe/p/uj+p9aAEz79Qc++FH49aAAtkHkfr6j1/z9ewB9BfC79k39qj44+Hbrxf8Ff2aP2gvjB4StdVuNCuvFHwt+DHxG+IPh221y0tbS7utGudb8KeHdV02DVbW0v7G6udOkuVvILe8tZpYliuIWcA9I/4d0/8FBv+jE/2zOpP/Jr3xt9c8/8AFFUAL/w7q/4KDf8ARif7Zn/iL3xt/XPgo0AJ/wAO6f8AgoP/ANGJ/tl9/wDm17429zn/AKEr/PvQAf8ADun/AIKDZz/wwn+2Z/4i98bff/qSvfNAD1/4Jz/8FCHdUT9hL9sxndgir/wy98bMszNhQP8AiiupJAHvQB+zX/BMb/g2D/b8/bI+J/hjVf2lfhZ47/Y//ZosdQsL/wAeeMPivoZ8KfFPxLoUdwJb7wt8L/hhroTxQPEetWq/ZLbxb4v0XSfCHhy3un1wnxJe2Ft4a1MA/wBVP4YfDbwT8G/hx4D+Enw28PWHhP4e/DPwh4d8B+CPDGloyafoHhTwppNromg6Rah2eRorDTLK2txLK7zTmMzTO8sjsQDuqACgAoA/lw/4OV/+CINx/wAFKfgtp/7R/wCzpoVo/wC2f+z74a1C30zRLeGKG6+P3wrtpLnWLv4Wy3RKf8Vp4fvptS1z4W3Nw5trrUdU17wjfeXH4msNX8PgH+VJqml6nomp6joutadfaRrGkX13peraTqlpcWGp6ZqdhcSWl/p2o2N3HFdWV9ZXUUttd2lzFFcW1xFJDNGkiMoAKNABQB+xP7FX/Bev/gqZ+wX4e0fwH8E/2m9c1v4UaEY0034R/GHR9H+LXgXTbGFVWLR/Dx8X2t54r8E6Gm0sujeAvFPhbTxLLNP9n86aSRgD92fA3/B7h+2RptpZR/En9jb9mrxfeRQxJf3XgzxN8T/h/HeSqoEs0FtrOsfEf7GJSCwjaa6EZbhmUAUAd1rf/B8F8frgN/wjf7A/wf0okfIdb+NHjTxAFPqwsPBvhkuM9gyfXvQB8t/FP/g9A/4KbeLoJbL4a/B/9kr4R28kZC6nB4K+InjnxNBKQRvhvPE/xMPhkoPvLHP4PnbcPmlZcoQD8h/2lP8Agvh/wV2/ass7rR/if+298WdG8M3aSW83hT4OyaH8BtCuLGbd5mm6pH8G9I8E3/iOwcMyyQeKNQ1szJtSZ5ERFUA/Im+vr3U7261HUry61DUL+4mu76/vria7vby7uJGlnubq6uHknuLieVmkmmmkeSSRmd2ZiSQD+8D/AIMcf+Sg/wDBR3/sTf2Yf/T38c6AP9C6gAoA/wAW7/gu5/ymG/4KH/8AZy/jj/0K0oA/JigAoA/3nvgF/wAkJ+Cv/ZJfhx/6h2jUAetUAFABQAUAFAH8tX/B4J/yh61T/s5f4H/+g+MKAP8AKcoAKAOqbwJ43TwRF8TX8G+Kl+G9x4quPAkHxBbw9q48ET+N7TSLXxBd+DYvFZs/7Bk8VWug31lrdx4eS/bV4dIvLXUpLNbO4imcA5WgD9PP2NP+Cy3/AAUt/YI07SvDP7NX7V3xC8NfDvR5nksvhN4sbSviZ8KbWG4na5vrPSvAfxD0/wASaH4Yh1GZ5Zb2fwjB4f1GSeWW7jvorx/tFAH7Y+Ev+D0P/gqbocS2/iP4Q/sU+NkCYa71D4Z/F7RtUeQLhXMvh/48afpYBb5pUXRhu6RtCKAPCfjz/wAHcv8AwWB+NHhzVPDXhfxV8CP2dItVVoZta+BXwnuYfE9vZyK6TW2na58WfF3xZn0qSZGx/amlJZa1auqzadqNjMA4AP5yfin8Wvil8cfHev8AxQ+M/wARvG/xX+JHim5S78SePPiL4o1rxl4u1yeKFLeB9T8QeIL3UNUuxbW0UNpaRzXLRWlpDDa2yRW8McagHn1ABQB/Rv8A8G1//BJef/gpN+2pp3j74qeGJdQ/ZJ/Zcv8ARPH/AMXH1Gzd9C+IvjFLk3fw9+Cyyyo1vfxeItRs28QeObILNGvgHRNU0q9k0688VaBcTAH+t+qqiqiKqIihURQFVVUYVVUYCqoAAAAAHAoAdQAUAf4N/wC0h/ycR8ev+y0fFL/1ONdoA8XoA+pf2Gv+T2P2Pf8As6X9n3/1bPhKgD/dToAKACgAoAKACgD/AAaP2h/+S/8Axy/7LD8TP/U11ugDx6gD6T/Yz/5PA/ZS/wCzk/gZ/wCrQ8L0Af7stABQBDcW9vd289pdwQ3VrdQy29zbXESTW9xbzI0c0E8MitHNDNGzRyxSKySIzK6lSQQD/KN/4OP/APght4j/AOCb/wAcNW/aT+Afhi5vv2Hfjj4svLzRE0iymkt/2ePH+tzzX918Jtf8pXSz8HX87XVz8KNamMcUmlRzeDNQZtY8P22p+JQD+X+gAoA7b4dfEr4ifCDxpoHxH+E/jvxh8M/iD4VvU1Lwz448BeJNY8I+LNAv0BVbzR/EOg3lhqunXGxmRpLW6iZ43eNyyOykA/pJ/Zq/4O5P+CuXwLt7LSPiN4o+D37U+g2kKWir8a/htb6Z4pitIkCReR4v+Eeo/DW+vr5dq+Zqfiq18U3lyDI1288zrOgB+jmg/wDB8D8f7e0dPFH7A/we1i/MeI7nQfjP418N2iy8fO9jqHg7xVM8fX90NQjbp++oA5nxB/we7/tY3KTDwt+xL+zxo0jK4gbxB46+JPiVI3IOxpk06Twm06q2C6JJblwCA6E7gAfB/wAXf+Dvb/gsX8SBdJ4M8V/s/fAOOfesJ+FXwQ0vWbi1RshTHN8bdZ+MCvMq9ZXhKl8ukcYwqgH4c/tLf8FAP22/2xrye6/ae/an+OHxptZrj7Unhzxn8QNeu/BGnziTzQ2i/D+1u7TwPoCiQCQR6J4f0+IOFYJuUEAHyBQB/tj/APBGP/lEv/wTh/7My/Z8/wDVbaBQB+mNAH+bz/wdsf8ABZD/AIXV8R7j/gmL+zz4q874T/B7xFbaj+1J4j0W93Wnj/4x6HOtxpXwpW4tZPLvPDXwju1W+8U20kk0F78UhFY3VnaX/wANba5vgD+I2gAoA/sP/wCDTr/gkD/w1b+0A37f/wAdvC/2v9nr9mHxVbx/CXRtZs92m/FP9onTUtdV0zUFhmQrf+GfgzHPp/ii+f8Ad29747vPB9jFLf2uj+K9MjAP9OmgAoAKAP5GP+DrL/gkB/w2T+zgP24fgX4X+2ftL/sr+Fb6XxxpOj2Xm6v8XP2eNPkutZ17TBDChm1HxP8ACia41Pxr4ZjQ/aL3w5deONDhh1TVbrwzZ2wB/l3UAFAH7N/8ENv+Crvi3/glB+2ZoHxLvbjVtV/Zz+KR0rwB+014FsPNuTqngR75307x1oumhjFceN/hffXlz4i8OkIt1qemTeJvB0d3YW3i28vIQD/Y28E+NfCXxJ8G+FPiH4C8RaT4u8D+OvDmi+L/AAd4q0G8i1DRPEnhjxHp1vq+ha7pF9AzQ3mm6rpl3bX1lcxsUmt543HDUAdPQB8U/wDBRz9ku0/bq/YY/af/AGTbi7t9PvvjR8KNe8P+FtTvQGsdJ+IGmNbeJ/hvrGoKUcvp2keP9C8NalqCRhZ3s7WdbeWGcxzIAf4fnivwt4i8DeKPEngrxfo994e8WeD9f1jwt4o0DU4Tb6lofiLw/qNzpOt6PqEBJMF9pmpWlzZXcJJMdxBIhJK0AYFABQB+8/7A/wDwch/8FQv+Cfvg7QvhZ4J+J3hf43/BrwxbWun+GPhZ+0Z4f1Hx9pPhPSLRfJi0fwn4r0fxB4T+I+h6Na2ojttK8PJ4yuPC+iRW8EelaDbQiaGcA/U/V/8Ag9m/4KBzafFHoP7K37HOm6qIyJ73V9P+Net6fJLzh4tMsvi34fuYY+mY31edup83nAAPjP42f8Hb3/BZD4uaZLpXhb4hfBP9nuK4WSO5u/gn8GNJbU5YJchootS+MWq/F+805gpKx3mkzafqMPDw3kcoEgAP6of+DRT9q39pf9rn9nb9sz4gftPfHf4qfHjxhYftB+E9N0nWvih4113xdLoGlTfDu0vZdH8NWurXlxY+GdFe8lluzo2gWunaX9qlluBaCaR3YA/rtoAKACgAoAKACgAoAKACgAoAaUXngjJydrMuSepO0jJPqcmgBNg9X/7+Sf8AxVABsHq//fyT/wCKoANg9X/7+Sf/ABVABsHq/wD38k/+KoAQoMHl+h/5aSf/ABVAH+RZ/wAHUTE/8Fuv2syxLY0L9ncDezPgD9nf4ahQCxJCqAAoBwAMAY4oA/nk+btj8c98/wBDz75oATnoMdB64xyP59PUUAHORwM+2ep3fp1zQAmTnoO3TPUrx+H9KAFBJHGO+OvXOT+H9eKAAZI4x+OQeg6d+/5fXkAOe+P4s9fbr+Y+goAD17Z/HuV6/l/L1oATnvj+Lu3rg559f8aAP7R/+DJv/k+n9rL/ALNKh/8AV1eAaAP9K2gAoAa3T/gSf+hrQB/gt/Gn/kr/AMVOn/JR/HfX/sbdb/z9M0AeZd+3U+uf4v8A69AB6fd7Z655bsfT+uaAD16chh3z1PJ9vfsaAE5y3A9TnPufz9fXnNACjqM4zk9M55H6n1z25oATnjhefr6Hr+HHegBfrjP4+q/mePzx68gCev3f4u5/ug8c/wCR0oACCAc46ds56jnn9T9M5oA/1F/+DMvLf8Eq/iepZ9o/bW+K5Ch3ABPwr+B+SAGABbA3Efe2rnO1cAH9bWwer/8AfyT/AOKoANg9X/7+Sf8AxVABsHq//fyT/wCKoANg9X/7+Sf/ABVABsHq/wD38k/+KoAcFAyQOT1Pc46ZPU4HqTQAtABQAUAFABQB/K3/AMFzP+Daf4T/APBSC58R/tM/swX3hz4GftotYvda+l5bfYPhV+0LcWsWIE+ISabazXXhf4gvGiW1l8TNMs7/APtKNI9O8aaPqqGw8QeHQD/M9/am/Y//AGmf2J/ijqPwa/an+DPjb4L/ABC0/wA2WLSvFmmhNP1/T4pjB/bng7xNYyXvhjxt4clmVooPEfhLWNZ0SeZJIY79popY0APm2gAoAKACgAoAKACgD/QU/wCDJT4NfF3wc37dHxW8XfDDx/4W+GXxK8Nfs9af8OviB4i8I69ong7x7eeHdV+MNxr8HgzxFqVhbaV4obRIdY0l9WbRLq+TT/7TsRdtE11CHAP75KACgD/Fu/4Luf8AKYb/AIKH/wDZy/jj/wBCtKAPyYoAKAP9574Bf8kJ+Cv/AGSX4cf+odo1AHrVABQAUAFABQB/LV/weCf8oetU/wCzl/gf/wCg+MKAP8pygAoA/vo/4NU/2dPgT/wUB/4Jf/8ABRn9hn9o3wwniz4aax8evBfjCaK3khtvEXhDxB42+Gdto+gePvBWqzW92dA8ZeHNQ+Gcd9omrLb3EDvbTadqtnqmiXuqaVegH82v/BXf/giP+1X/AMEmvibqC+NNE1L4l/sy6/rktp8KP2l/DmkTDwnrtvcmSfTfDfjy2t5Lz/hXfxGitlaO58OazcCx1uW0v77wbqviHTLS7ntAD8YaACgAoAKACgD9jP8Agkn/AMEVP2r/APgrJ8UbGx+HehX/AMPP2dPD+uQWfxb/AGlvE2kTnwV4TtIfLuNR0LwjDNLZf8LD+I8lnJGLDwhoV0UsJr3Tr3xfqfhnQ7pNUYA/1vf2Jv2LfgJ/wT+/Zx8A/swfs4+Fv+Eb+HngW0d5729a3uvFPjfxTfrE3iLx/wCOtZgtrT+3fGPie7iS51S/+z21pa28VjomiWOleHdJ0fSLAA+r6ACgAoA/wb/2kP8Ak4j49f8AZaPil/6nGu0AeL0AfUv7DX/J7H7Hv/Z0v7Pv/q2fCVAH+6nQAUAFABQAUAFAH+DR+0P/AMl/+OX/AGWH4mf+prrdAHj1AH0n+xn/AMngfspf9nJ/Az/1aHhegD/dloAKACgDg/ij8Lvh18bPh54x+E3xc8F+HfiL8NPiBoV94Z8aeCfFmmW+seHvEeh6jH5d1p+pWF0jxSoflmglXZcWl1FBeWk0F3bwzRgH+a//AMFkP+DUT49fsxav4m+PH/BOvRfFn7Rv7Ok8t/rerfBa1D698e/g7alpLmSx0TTogdR+M/gyzz5Ol3eg2918SLC1aC11vw/4hjsNR8aXYB/HZe2V5p15d6fqNpc2GoWFzPZX1jewS2t5ZXlrK8FzaXdtOqT29zbzo8M8EyJLDKjxyKrqQACtQAUAFABQAUAFAG/4W8KeKPHPiHSPCPgrw3r/AIw8WeIL2LTdB8MeFtH1HxB4h1vUZ8iGw0jRdJt7vUtSvZiD5VrZ2008mDsQ4oA/20/+CTvgTxp8MP8AgmT+wR8O/iN4U8QeBfHvgr9kz4GeG/GHgzxZpN7oPifwv4i0n4faHaaroXiDRNSht9R0jWNMu45bTUNNv7eC8srqKW3uYYpo3RQD87/+Din/AILAWH/BLr9kS48O/DDXLQftg/tG2GueD/gZYRSQ3F98PtGSBLTxj8cNSs33pFb+DIL2Ky8GR3qPDrHj/UNIP2LVdE0HxVFagH+RRqGoX+rX97quq3t3qeqand3OoalqWoXM15f6hf3kz3N5e3t5cvJcXd3d3Ekk9zczySTTzSPLK7u7MQCpQB9qf8E9v2G/i1/wUY/a2+Ev7J/wdtni1z4ha0snifxXNZy3ej/Dj4d6QUvPHHxF8QiN4lGl+GNFE09vayXNrJruuTaP4Y0+Y6vrmnwygH+1J+yt+zL8Jf2Nv2efhN+zH8DdAXw58MPg74RsPCfhuzbynv79oTJd6z4k166higTUfE/i3XbrU/E/ijVfJibVPEGrajftHGbjYoB9AUAFABQA10WRWR1V0dWR0cBldWBDKynIZWBIYEEEEg5oA/yZf+DmH/gkG3/BN/8Aa4k+Mfwd8MtYfsg/tTaxrXij4eQ6baldH+FPxLLNqnjv4Nv5KC303S4pbiTxX8NbZ1tYpPB95deHNNjvX8B6xesAfzR0AFAH99P/AAaMf8FkvsF1bf8ABKn9ovxViyv5tX179jjxZrt7hLS/la61vxd8AZ7y4fasWoyHUfGXwyimKY1E+K/CcV1NNqPgrRYgD/QPoAKAP8+//g6T/wCCCHjXVPHHin/gpd+xT8Nr7xRp/iWDUPEP7YPwo8F2H2rW9K8QWiCe8/aB8LeHLNTd6xp2vWolm+Ltjo9tNqOm6xaP8R7i11Cz1zxrq2ggH8C9ABQAUAFABQB/pF/8GRX/ACaJ+2p/2cf4O/8AVZWNAH9ttABQAUAFABQAUAFABQAUAFABQAUAFABQAUAIeh+hoA/yKf8Ag6hz/wAPuv2tMdf7D/Z4Pv8A8m8fDX/9dAH88hb0K/if/r/WgAB68j+HnPHfPOev/wBbNACZPqPzyP4vfpyM0AHIPJ4+uegOf16+9AACcDkZ5+8T6jr+H4/rQAZPrz05PHQdfxB/HPvQAE98jkN3/LoevH/oWKADPvz6Z56qfp0z+H40AGT6j+LPPHJ47/l7dKAP7Rv+DJv/AJPp/ay/7NKh/wDV1eAaAP8AStoAKAGt0/4En/oa0Af4LXxpOPi/8VOf+aj+O/8A1Ldb/wAj3xQB5nnkcjGTnnnvjv05H14oAM+/p0OR1JPJ56DmgAz156hu59cj9D9cYxQAZPzYPXp+ucc/yzzigBc9CSO+efw4HpkHrn16mgBM9OenXnnoc559SOvfr2oAM+pX3wT7e/8Ak/U0AGevPqBzyeOPr3z/ALVAAxOOvJGePqM/h6e2cmgD/UW/4Myv+UVnxQ/7PV+K/wD6qv4H0Af1u0AFABQAUAFABQAUAFABQAUAFABQB4V+0J+zD+zx+1j4Bu/hd+0r8F/hx8b/AAFeea//AAjnxH8K6V4ltdPu5Y/K/tTQri/t5L/w5rkKY+ya9oF3pus2TqktnfwSorgA/lQ/az/4Mwf2Fvipc6jr/wCyh8cPi9+ynrN5NNPD4U1+2t/j58LbFCzSRWWl6Z4i1jwn8SrJWLGBrzVPin4k8mIRSJYyPFIlyAfhT8XP+DL7/gpb4QvbuX4T/GX9lL4xaFHu+w+f4u8f/DnxbdBScfadB134fal4asy67doj8d3oDl1dkVVkcA+MNZ/4NS/+C3ulySpY/su+DvEaxuypLo37Rn7PkEc4UkCSIeIfiRoMoV/vKJ44XAPzojZFAFjSP+DUX/gtzqW37b+zR4G8P7sZ/tf9or4DT7M9d39g+P8AW8477N/tmgD6I+HP/Bm9/wAFaPGM6f8ACX+If2UPhLaBx57+L/i54p1698rI3NaWvw5+GnjW2nmwTsiudRsY2IIeePgkA/T34Af8GQRXUbbUP2pv26/N0mMxfbPCXwA+Fn2fUbtScz/ZviJ8RdaurbTygBji834X6kJS/nOYvL8mUA/os/Y5/wCDcT/gkp+xdqmmeK/CX7N9r8ZviJpPkPY/EP8AaW1X/hcerW1zbMJLfUrDwnqljp/wq0bWbecC5ttb0L4eaXq9pcBHtb2ERRLGAfuZDDFbxRQQRRwQQRpDDDCixxQxRqEjiijQBI440AREQBVUBVAAAoAkoAKAP4t/27v+DQb/AIbY/bD/AGiP2sP+Hhf/AArT/hffxM1z4i/8ID/wyb/wmf8Awin9tGE/2P8A8JT/AMNL+FP7d+zeV/yEP+Ec0fzt3/HjFjkA+S/+IGP/AKyif+aT/wD5W9AB/wAQMf8A1lE/80n/APyt6AP7z/APhb/hB/AngrwV9u/tT/hD/CXhzwt/af2b7F/aP/CP6PZ6T9u+x/aLv7J9r+yfaPs32u68jzPK+0TbPMYA62gAoAKACgAoA/lq/wCDwT/lD1qn/Zy/wP8A/QfGFAH+U5QAUAf3t/8ABjf4okg8V/8ABSHwU7s0Wp+Hv2W/FEEZOVik0LUvj1pN06DPDTr4is1lIHzC3hBPyjIB/fn448C+Cfib4S1/wD8SPB/hfx/4F8V6dNo/ijwZ410DSvFHhXxHpNzj7Rpmu+H9btb7SdWsJtqmW0v7SeByqlkJUEAH8l37c3/BnV+wj+0Bqes+Nv2S/iL42/Yy8Z6rcz38vhK200fFz4HPcTM080em+DNd1vQPGfhQXdwzqq6P8RLjw5o1u6RaR4OitraKzYA/nM+Mf/Bm9/wVX8A3l+/wy8T/ALMfx30ZJHOlv4a+Jmt+CPEl3bg4Q6jo3xI8HeGtD027bGWt7Xxhq9sgK4v3O4KAfK7f8Gqv/BcQXotR+yX4beA9dSX9o/8AZr+xDnHMbfFhdR568aeeOvPBAPp34P8A/BnJ/wAFYPH97ZH4ka1+zL8CdIeZf7Tl8W/FPVfGWvWtrn94+n6R8MfCHi/SNRuwOY7a68UaTbyc77+E4yAf0XfsN/8ABnD+w78CNT0jxr+198T/ABt+2P4t0y5t7+LwVFprfB34IpPEyzpBq/hrRNd1/wAdeLhaXKIA178QNH0DV7dZYNY8IXFtcyWiAH9bngH4feA/hV4O8P8Aw8+GPgvwp8O/APhPTotI8L+CfA/h/SvCvhTw7pcBYw6donh/Q7Sx0rS7ONmdlt7K1hiDu77dzsSAdfQAUAFABQB/Bv8AEj/gyS/4WD8RPHvj7/h5p/ZH/Cb+NPFPi/8Asr/hjL7f/Zn/AAkuuX2tf2f9u/4ausvtv2L7b9m+1/Y7T7T5fnfZoN/lKAcX/wAQMf8A1lE/80n/APyt6APU/gb/AMGV3/Cl/jZ8HvjF/wAPKv8AhJP+FT/FP4ffEv8A4R3/AIY4/sf+3/8AhBPFukeKf7F/tf8A4ao1X+yv7V/sr7D/AGl/ZmpfYfP+1fYLzyvs8gB/dTQAUAFABQAUAFAH8GnxD/4Mj/8AhPfH/jnxz/w81/sr/hNPGPibxZ/Zf/DGP27+zf8AhI9avdY+wfbv+Gr7P7Z9j+2fZ/tX2S1+0eX532aDf5SgHHf8QMf/AFlE/wDNJ/8A8regD0r4M/8ABlN/wqP4wfCn4r/8PLf+Eg/4Vj8SvAvxD/sH/hjb+yf7c/4QvxRpfiT+yP7U/wCGqtS/s3+0v7N+xf2h/Z2ofY/P+0/Yrry/IkAP7sqACgAoAKACgD8p/wBu3/gij/wTc/4KKG/1n9on9nPw2nxNvVJHxv8Ahk7fDP4wifbsjuNV8XeGo7dPGv2eMulpZfETTfGGlWm9nt9Pjm2yKAfywftIf8GQ0L3up6r+yJ+3E9vpzmQ6P4F/aO+HIu7y3HzNGNR+KvwzvLSK63ZWNzb/AActCm0yjzS4iQA/JH4hf8Gfv/BYbwZJKnhvR/2bPi2iOVSb4f8AxwXTFmXdgSIvxU8KfDSRQR82JURgOME0AeFS/wDBqz/wXGju1t1/ZF8PzQnOb+P9pH9mUWq4PVkm+LsV8c9Rts2OM5wcAgHc6N/waWf8Fp9UaNb74K/Cbw4HKhn1n9oH4YTrCGIBaT/hHtZ15yFzlvKWViAdoY4BAPsn4Y/8GVP/AAUR8RfZ7j4qftGfsk/DOym2mW10DWPip8R/ENoD98XFgPhv4P0F5F52ra+KrmN8czJQB+yH7M//AAZVfsX+BP7P1X9qb9pf43/tB6vb+VNceH/h9pfh74FeArqQ4aay1CFn+JPji+tUyYo7vSvGvhe6m2i4aK33/ZkAP6cP2P8A/gnH+w9+wR4f/sD9kr9mv4afB6Sa0FjqfizStJk1v4k+IbUFW8jxP8UfFNxrnxE8SW4kXzY7PWfE17ZW0jyG0t4A7LQB9sUAfx1/8FHf+DV74xf8FLf2tfiX+1d8Zv8AgqP/AGfqXi+7j0rwR4Ftf2OZ9U0L4V/DLRZLiPwb8OPDtzN+1fYi4stCs7ia51TVI9N0xvE3inUfEHiy70601DXruFQD4X/4gY/+son/AJpP/wDlb0AH/EDH/wBZRP8AzSf/APK3oA/og/4Im/8ABCn4Rf8ABG/wz8Wbyx+J3/DQ/wAcvi/qVlZ698ab/wCHEHwzm0n4b6Mlvc6N8OPDnhb/AITb4iy6Vp7a/wDbvEXibUo/FLN4qvx4fW+sIIvCmkbAD93KACgAoAKACgD4o/4KGfsK/CH/AIKPfsl/FX9k74zRfZdC8f6ULjwv4xttPh1HXPhn8RNH8y78FfEfw5DNPaGTU/DOrFZLrT0v9Pj8RaDc614V1C8i0nXdQDgH8bX/ABAx/wDWUT/zSf8A/K3oAP8AiBj/AOson/mk/wD+VvQBv+Ff+DI3xJ4G8UeHPG3g3/grDqvhfxf4P17SPFHhXxLoX7Gk+m634e8R6BqFvquia5o+o2v7Xcd1YappWp2ltf2F7byJPa3UEU8TrIisAD+6H4XaJ4+8NfDfwL4e+KnjrSvid8SdD8KaHpPjn4jaJ4MHw70rx14o0/T4LXWPFln4EXxH4uj8JR6/exS6m2gQeJdYtdNluZLa0u2tkiRADvKACgD+aX/go7/wa0/8E7P269S8RfEf4a6fqX7G3x68Q3t5q+p+O/g3pFnqHw78Ta1fzPcXmpeNvglfX2leGry4uZ5ri8u7zwHrHw31fU9Sne+1vVdVkLo4B/LJ8d/+DML/AIKP+A9Rlk+Bnxi/Zr+P3hsyvHbPea/4p+EfjQqu5lnvvDfiPw/rnhW2ikXaoFp8RdSmWVirQiJfPYA+KtX/AODUz/gt9pt6lrZ/st+D9fgZiralpP7Rn7PUNlGB/G6a78StF1Eq3YJYO/qg5wAej+Bf+DRT/gsv4uvY7XxB8P8A4EfC6B9u7UvHXx58MahZRbjz5ifDO1+IuonZ1fyrCXI+5vPFAH6i/s+f8GQ3xXv57O+/ar/bh+HvhS2ilR9Q8Mfs+/DrxJ8QJ9Qh3DzYLPx18Rrr4aR6PLsyUu5vh5rqBsBrMg7gAf2Of8Ezf+CWH7Lv/BKP4QeJfg/+zGvxCv7Hxz4js/F/jzxZ8TfFkfijxV4s8SWOkwaLbahdJpmleHvDGkRQWEAijsvDnhvR7ZtzSXKXM2JQAfpFQAUAFABQAUAFABQAUAFABQAUAFABQAUAFACHofoaAP8AIp/4Oof+U3X7WnJH/Ej/AGeOn/ZvHw19xQB/PIc9vfoM9/c9f580AHPqc4XqPX2z+dACZPqc/Tv83vjt+PFAC8569+/rtz6/j9fzoATJwOSeD2yev19/60AAzjjPbtnsvvQAEn17N2/+v+Xrx60AKTz17j+a/wCP60AJk8cn+Ptz19P8+negD+0b/gyb/wCT6f2sv+zSof8A1dXgGgD/AEraACgBrdP+BJ/6GKAP8Fv40Z/4XB8VMf8ARSPHfv8A8zdrX+TQB5lzkderduM5bv6+1AByR17jqMc7vr/n86AFz15/vn8j1oAQk/Pye34fTn8z+NAC5ORknn29u/PHf8c9qAEGfl+g/wDQT780AAJ7569wB6f4/kSe3IAc/N/wLPHsPf8ALrkUABzg5PYdsdT9fzoA/wBRb/gzK/5RWfFD/s9X4r/+qr+B9AH9btABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/LV/weCf8oetU/7OX+B//oPjCgD/ACnKACgD+6L/AIMfP+S7ft/f9kl+CH/qY+O6AP8ARWoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAEPQ/Q0Af5FP/B1D/ym6/a0ycf8SP8AZ47Z/wCbePhrQB/PESD1K9+xPf1/z696ADj26Dkgnpn8fr6dOaAE4PGQevb/AHj+GM9O/H4AC5Ge2evTtt/ljnHrx70AAxgf4HJy2e2e4wB9TQAcdD39iT0Hp7YJ/wDrZIAce3RucEdcZ/Q8eucccZAAkZznv9f7v1z09e59KAD5fy3dj3Pf8On9DQB+in/BOH/gqB+05/wS1+Jfjz4r/svP8O08V/EbwGnw68RH4i+D5fGOm/8ACOp4j0rxSPsFnHrGim1vv7T0i03XLTzL9n8yIwbykyAH7Df8RgX/AAV5/wCe/wCy9/4Yy5/+bugBf+IwL/grz/z3/Ze/8MZc/wDzd0AA/wCDwP8A4K8ghvP/AGXvlIb/AJIZdgHDAjJHjwEA+oIPdSOoAP5fvEevX3inxBrviXVTb/2l4g1nVNc1D7NCYbf7dq9/c6ld+RAGbyYftN1N5UQZhHFtTc2NxAMbvnvk/T+LOT+f5e/AAgxwMgnjHHPXPXt/k98UAHryOjfqT378g4HfrQAHaS3v7Z6DnB+vOTjP60ALxkcjgn1H1yT3Hr3oATK8dP8Avk+h6+vJ7f8A16ADj1HX3GOR1zk9QB+J9KADjn8ex7gE49PUZ65I96AA4wcY/Ig8H1PXnGe/egD/AFF/+DMr/lFZ8UP+z1fiv/6qv4H0Af1u0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8tX/B4J/yh61T/s5f4H/+g+MKAP8AKcoAKAP7ov8Agx8/5Lt+39/2SX4If+pj47oA/wBFagAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAQ9D9DQB/kU/8HUP/Kbr9rTk/wDID/Z46df+TePhr70Afzxt9SOv8/cj16/4jIAde56L69/x6n/9WSaAE/E9Dz/33nvz+f8AOgBe/U5z7/3OvXr+p9aAE7ZyenX/AIF9fpn+dAByR1PbPvlV65I/X+Z5AD8T/Gefpjpn6+/86AFPX3z9O6e+aAE9OT0Y9SfUdz/Ln1POaAD05P8AD79hjPPHOT+PGaADnJ5OcHsfUdOc/wBPegAx82cn73+e/wCHrjnGKAFPXqf8vzzQAmODyeo9fT6/17DHUUALn5hz3PH4t7/0/GgBB2OSeR69d3Xr1/P8yKAF9evR/wD0I579f85oAQ/xcng+/fPv/n3zQAo6jk9SR+X17dvX9SAJjpyeg9ePlPv/AJ6DvQAD6k88/mvuc/8A1z+IAc+pyN38h3ye/P19KAAjAPJ6D8ifXJoA/wBRf/gzK/5RWfFD/s9X4r/+qr+B9AH9btABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/LV/weCf8oetU/7OX+B//oPjCgD/ACnKACgD+6L/AIMfP+S7ft/f9kl+CH/qY+O6AP8ARWoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAA8gj1oA/yZP+Ds/wCHfiPwZ/wWa+MvibWbC5ttJ+LXwq+Afj7wfdTRFYdT0PT/AIbaX8Nr+4tpCNsiWvinwFr+nzAHcktuQ+AylgD+ac/7v6A/4/5P1IAEx14/u8Hn1/z/ADxzgAMdsHp+P8ftnn6UAL36d/Qf3OnX9OnvQAg6Dg/5bPPBz+X4HnAAYyOmenbP8K/U/ofw60AGPb+9wPfH19f8M8ZAA8547/h1X3Pp6+vPoAGOnB/j6d+39e4Ht6EAOfTpjtyDgfT8f129aAFI5OB1HXAwec569+vXPqPUAMc5x/ETyP6+nf8AXJPFAAfp+ffLZ/zkj8eoAExweO47e3+c59TnGTgAX+IH3b+bf56/40AIOg47jk4z9716/p3oAX14PRh78k/5/lnnAAh/i45/D3+uPc859uoAF5yDg9Sew6gdfr74Pt2oATHTjqB2Hoffn1659cY5AD8D1z9eVPoPTPPoc85wAGOvH97t04H+ep/HrQAYJzxycAYGMknj8z7c9wvcA/1VP+DPn4d+IvBX/BIz/hI9csbmzsPi1+0/8aPiB4RlnjMaal4a0/TfAnw1bUbfcAXt5PEngLxHaJJ0drKQoSuCQD+qCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoA/lq/4PBP+UPWqf9nL/A//ANB8YUAf5TlABQB/dF/wY+f8l2/b+/7JL8EP/Ux8d0Af6K1ABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfiV/wAFrf8Agix8IP8Agr58GPD+kaj4hj+FP7RHwmXWrz4K/GWHSBq9tax60kEus/D74g6TBJbXuv8Aw98SXdlY3haxuote8Ia9bQeIvD73MMuv+HvEgB/n1fFL/g1b/wCC03w78S3uh6B+zX4Y+L2k29xLDZ+M/hj8bvhJJ4d1eKNmUXdpaePPFPgXxZZxSbcrDrXhvTrpchWiOC1AHmn/ABDN/wDBb3/oxjxD/wCHl/Z2/wDntd+p9TzQAf8AEM3/AMFvf+jGPEP/AIeX9nXr6/8AJWu3b0wPSgA/4hm/+C3v/RjHiH/w8v7Ov5/8laznPP15oAP+IZv/AILe/wDRjHiH/wAPL+zt1z1/5K117fTigA/4hm/+C33/AEYx4h/8PL+zr+ufi1+P1JPegD8a/jH8IPiN8Afir8Q/gl8XfDkvg/4n/Crxfr3gPx94Wn1DStVm8P8AizwzfS6brWkyanoV/qWjXz2N9FJC13pmoXllMV8y3uZoislAHmvPXnP/AOr2B6Z7dz3IyAKcn179sc9Rj+fX64NACZIH8j/P16nPXr1G7OKAA5I75/HPUc//AFwO56ZxQAvt7/pnp6dOevTtQAnQ/wCT3P8ATj1xx1wKAA55P5Hn0PT6nH4kdeKADHPsc8fnj+Y4I9PQ4ADnpyc4znv8xzk8jp79OMHsAffv7Ff/AAS7/bs/4KIab8QtX/Y5+Amo/GfTvhXe+G9O8e3Fj42+G3hIeH7zxfb6xd+HYnj8e+MPC8t82o22g6rIj6Yl6lsLQrdtA0sAlAPuH/iGc/4Lff8ARjHiH/w8n7Ov/wA9mgA/4hm/+C3vf9hfxCfr8Zf2dufr/wAXa/H680AH/EM5/wAFvv8AoxjxD/4eT9nX/wCezQAf8Qzf/Bb3/oxjxCf+6y/s6/8Az2f85PqaAD/iGb/4Le/9GMeIf/Dy/s7f/Pa7dR780AfpX+wR/wAGff7dHxa+I/h/Vf2577w1+y18FNPvYLzxTo3h/wAaeE/iT8bfFWnxSLLJo3hGz8H3PiPwL4Ul1ONJbJ/FPinxBeTeH3ljv7bwd4gkiFo4B/pO/Bn4PfDj9n34U/D34JfCHwppvgj4Y/CzwjofgfwP4V0lXFlovhzw9Yx2GnWgmmaS7vrpo4zcajqmoTXOp6tqM93qmpXVzf3lxPIAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8tX/B4J/wAoetU/7OX+B/8A6D4woA/ynKACgD+6L/gx8/5Lt+39/wBkl+CH/qY+O6AP9FagAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgBCoPUA/UA/wA6AE2L/dX/AL5FABsX+6v/AHyKADYv91f++RQAbF/ur/3yKADYn91f++RQB/ij/wDBaQf8bZ/+CjP/AGeT8f8A1/6H3VPT/P40AfmN7/r36r6ZHb37deaAE9BgcBv7349vz6+3NAB/9bPXsB6jt1GSP9qgAOTn3HX5s4znsMH8M98HHFAC85/HP8XX67fz5xjt3oADn/O7+9n06/nnjjFACc4/EdN3p/u5/wAjOe4AvcHjqfX1bjOMf1oABnj8P73r7jucd+oHagD/AEM/+DHgA/Db/govkA/8Vt+zJ1H/AFLnxm9f59+tAH932xf7q/8AfIoANi/3V/75FABsX+6v/fIoANi/3V/75FABsX+6v/fIoAcAB0GPpQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB+WP/AAWG/wCCasv/AAVd/Y6uv2T4vjNH8CHufiZ4H+In/CeSfD1viaqDwaNYB0j/AIRhfG3gAsdR/tbi/wD+EgX7J5H/AB53Pm/uwD+Ub/iBmvv+kndp/wCIaTf/AEVFAB/xAzX3/STu0/8AENJv/oqKAP3W/wCCHP8AwQKuP+CNPjz9oDxrN+1ZD+0WPjn4S8D+Fl02L4IP8JT4YPg3Wde1Y3xvH+LnxK/tn+0f7b8gWwtdK+yfZvN+0XPneXEAf0Y0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAf4ov8AwWk/5Sz/APBRrP8A0eT8f/8A1PdU/pn/ABoA/MY9fTnv/wAB75Pt/XvkATg9x/F+H6jr3yfpgcgAUfUdvqcYOP5YwfqTzQAHHJyffkZHPTOfU/8A1+xADj15znqM56Z69fXtj+HNAB+P5/73+POcYzjoOKADjnnuOhHp9evHHOOmckGgA79ecn69W/n3yMcn2AAAY459OMjHX6k/r168cUAf6Gn/AAY7/wDJN/8Agov/ANjr+zJ/6jfxmoA/u/oAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAP8AFF/4LSf8pZ/+CjX/AGeT8f8A/wBT3VKAPzFJ5J9+/wBU70AGQe3Zsc+uc8Y/LP60AGf129+eAPbn8+DycA0AGeSe+PUdMj2wP1Pr2oAM8/8AAj375+np+GOPvUABP9T1/wBv/PJ/xoAM8Hp1Hf29+e3f8ehoAUfeHrk/zb/PX0oAQHgDscdSP73p1NAH+hr/AMGO/wDyTf8A4KL/APY6/syf+o38Zs0Af3f0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH+KL/wWj/5S0f8FGucf8Zk/H/r/wBj7qn/AOv9fqAfmOfbHXp+K+3uT+INADefUfxenb8PXk9c9aAF5yeR2z05yB7c5/CgBeeeRj6j1+n4d+aADnPUYz6j8unX8f8AGgAOfUfp/e+nbpnnmgBMnnkZyOcj0+nft/8AW5AF5yPqf/Zuen659etACc8HIPTPQ9W+n9RyPagD/Q0/4Md/+Sb/APBRf/sdf2ZP/Ub+M1AH939ABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/ii/8ABaT/AJSz/wDBRr/s8j4//wDqe6pQB+Yx9D0z9B1Tt+P86ADPvzh/55/+vQAA+uCTjr7qO3Xn2/HHWgBCcgjjvnkdcjnOePofpQAuQfrn27N09f8AH6mgBD156d8+z/8A16AAkHP1/mp/qf1J9aAFB5x7k/q9ACA56nk7fTsx7UAf6Gv/AAY7/wDJN/8Agov/ANjr+zJ/6jfxmoA/u/oAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAa2drbeWwSo9Tjj9aAP8bP/g4Y+B3jL4Ef8FhP23tK8XaZcWdv8R/i5qfxz8H6jIjiy8QeDPjNbW/jXS9U0q4Py3dtZ6hfax4bvpYiyW+uaBq+nE+fZyKAD8WSc56fjn/Z64P8vb/aoATjPbv/AHvQ+v69/TmgA4yDx2/vdgPw+mT9aADj5unP+96jr/8AWzz7ZoAON3bOf9rPX8s/pQAp79Oh65/vd8H1/HPtzQAnGD93qP73v+P+TntQA7+IHHr655J98eucn1x2oAb6dP8Ax7+91H/1+/4UAf6UP/BlL8DvGfgz9kD9rD4767plzpvhb43/ABu8HeFPA812jRHXbL4K+FNYtvEOu6crAC50hfEfj+58OpfRFon1fw/rViT5thKoAP7VKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgD8dP+Cuv/AARY/Zj/AOCu3w40LSvifd6l8MPjf8PbW/t/hP8AtA+ENMsdS8S+GrLUpRdX3hLxXod7LZ2nj74e3t8F1NvDV9qOl6hpGq+fqPhbxD4fn1PXBqoB/F34x/4Mof8AgovZ67eweAP2m/2KPEvhpJpV0/VvF/iP47eB9durcOPJlvPD2j/BD4i2FhPIg3SwQeJ9Rjhb5UuZx89AHLf8QVf/AAVMz/yXz9gHnOc/FP8AaKPU5/6NW79/8mgA/wCIKv8A4KmZ/wCS+fsA9R/zVP8AaJ/X/jFbt29OvWgA/wCIKv8A4Kmc/wDF/f2Avr/wtP8AaJz9M/8ADK3T2/WgA/4gq/8AgqZ/0Xz9gH8Pil+0T+f/ACat19T3HGKAA/8ABlX/AMFTP+i+fsA/+HT/AGifXP8A0at17n1PoKAD/iCr/wCCpnP/ABf39gLr/wBFT/aJ/wDoVu3bnjjrigA/4gq/+Cpn/RfP2AepOf8Ahaf7ROe//Vqvv+poA+y/2Q/+DKD4oRePNG1r9uf9qz4aQ/DzSr22vNW8Dfsw2/jHxD4k8X28UqPNo6/EL4meEfAtt4MguFDRy6raeBfE995RZLWKxuHS9twD+974LfBj4X/s7/CnwF8EPgr4K0T4d/Cv4Y+G9P8ACXgfwZ4egeHS9C0PTUKwwI80k15fXt1M82oavrOp3N5rGu6vd32s6zfX2qX13dTAHp9ABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH/2Q==",
"attach_logo": "logo-2013-color-small.png,data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAMgAAADICAYAAACtWK6eAAAEJGlDQ1BJQ0MgUHJvZmlsZQAAOBGFVd9v21QUPolvUqQWPyBYR4eKxa9VU1u5GxqtxgZJk6XtShal6dgqJOQ6N4mpGwfb6baqT3uBNwb8AUDZAw9IPCENBmJ72fbAtElThyqqSUh76MQPISbtBVXhu3ZiJ1PEXPX6yznfOec7517bRD1fabWaGVWIlquunc8klZOnFpSeTYrSs9RLA9Sr6U4tkcvNEi7BFffO6+EdigjL7ZHu/k72I796i9zRiSJPwG4VHX0Z+AxRzNRrtksUvwf7+Gm3BtzzHPDTNgQCqwKXfZwSeNHHJz1OIT8JjtAq6xWtCLwGPLzYZi+3YV8DGMiT4VVuG7oiZpGzrZJhcs/hL49xtzH/Dy6bdfTsXYNY+5yluWO4D4neK/ZUvok/17X0HPBLsF+vuUlhfwX4j/rSfAJ4H1H0qZJ9dN7nR19frRTeBt4Fe9FwpwtN+2p1MXscGLHR9SXrmMgjONd1ZxKzpBeA71b4tNhj6JGoyFNp4GHgwUp9qplfmnFW5oTdy7NamcwCI49kv6fN5IAHgD+0rbyoBc3SOjczohbyS1drbq6pQdqumllRC/0ymTtej8gpbbuVwpQfyw66dqEZyxZKxtHpJn+tZnpnEdrYBbueF9qQn93S7HQGGHnYP7w6L+YGHNtd1FJitqPAR+hERCNOFi1i1alKO6RQnjKUxL1GNjwlMsiEhcPLYTEiT9ISbN15OY/jx4SMshe9LaJRpTvHr3C/ybFYP1PZAfwfYrPsMBtnE6SwN9ib7AhLwTrBDgUKcm06FSrTfSj187xPdVQWOk5Q8vxAfSiIUc7Z7xr6zY/+hpqwSyv0I0/QMTRb7RMgBxNodTfSPqdraz/sDjzKBrv4zu2+a2t0/HHzjd2Lbcc2sG7GtsL42K+xLfxtUgI7YHqKlqHK8HbCCXgjHT1cAdMlDetv4FnQ2lLasaOl6vmB0CMmwT/IPszSueHQqv6i/qluqF+oF9TfO2qEGTumJH0qfSv9KH0nfS/9TIp0Wboi/SRdlb6RLgU5u++9nyXYe69fYRPdil1o1WufNSdTTsp75BfllPy8/LI8G7AUuV8ek6fkvfDsCfbNDP0dvRh0CrNqTbV7LfEEGDQPJQadBtfGVMWEq3QWWdufk6ZSNsjG2PQjp3ZcnOWWing6noonSInvi0/Ex+IzAreevPhe+CawpgP1/pMTMDo64G0sTCXIM+KdOnFWRfQKdJvQzV1+Bt8OokmrdtY2yhVX2a+qrykJfMq4Ml3VR4cVzTQVz+UoNne4vcKLoyS+gyKO6EHe+75Fdt0Mbe5bRIf/wjvrVmhbqBN97RD1vxrahvBOfOYzoosH9bq94uejSOQGkVM6sN/7HelL4t10t9F4gPdVzydEOx83Gv+uNxo7XyL/FtFl8z9ZAHF4bBsrEwAAAAlwSFlzAAAZxQAAGcUB/Hz7SgAAJcZJREFUeAHtXQmsHVd5njMzd3m7n5c4jQOJTUiIbRwggCJKwG4hoJZNVNdFqKUKSEArVKVqGrWU8PwUQCgEFQmQSKUSKUiI+qGItYIINRa0AaUssbEdEnAWhSTEjp+f33qXmTn9vjNzX952Z+4699z7zrHn3XtnOef/v///zn9m5ixCSmmZFI+AsCwBlBRQr/7ZzVf6QfBeaYs3YOdu7B7SFUIBwSHfghT2EyKQDzq2fd+vbrjnSe5dqRN/m7QxAsIQZGNgqnuFJcCDkAL7H7z5fbYV3Aam7IKHlUGZIs7zq+dq+ulA1jxkzYIUzwSWfefJN9zzDcq6UjdNZe+6WHbXJdBdgMkjqh5+5U//5oO2kJ8HWbZix1nUzLPwME938UMZxSxlpuwg+Oepi5I70k17HboooIkgMeAfOnbIfeDgA951P/vga6T0vy6lyAhLLuKSLByvZ4KvampJSGxZZXwMIiZWhHDef/yGr/6yqmMMDJv6kLuptY9RXjU/DkoVIUCOm8GGLXC0c2yqqPsRdUcSk4FGh6J7JAmGZEGOReiyAy3DmyHiL1kBmKZWbWOZJlYNbApTBYXNK/73Q5fBwV4rhFhC7WszctS4RP/djHrQIdTFuv410I1CV3XVX4H0JTQEqYV5ITyQsSp0onF4FqIJKuHepQfEp04SzWroIq2tlVA3MCTU1fxdj4AhyHpMwj1T4QdixhCcCvceVgCnYju+txN0ULoIyw11gzqRrr2tWGekNwRJwFUKp/dJsZGOeOLQt7ptpG+T+wxBkoDz9H+Sm6RCzeN9rFpNnRs8YAiSCJh50JcIUR+fYAjSx8Y1qrWOgCFI6xiaHPoYAUOQPjauUa11BAxBWsfQ5NDHCBiC9LFxjWqtI2AI0jqGJoc+RsAQpI+Na1RrHQFDkNYxNDn0MQKGIH1sXKNa6wgYgrSOocmhjxEwBOlj4xrVWkfAEKR1DE0OfYyAIUgfG9eo1joChiCtY2hy6GMEDEH62LhGtdYRMARpHUOTQx8jYAjSx8Y1qrWOgCFI6xiaHPoYAUOQPjauUa11BAxBWsfQ5NDHCBiC9LFxjWqtI2AIUgNDM5daDWA22e7emdMGE8qmOe3nkR0H1YRxLhDyghVegb1cTScuJRyOu7SlY43OcCdFoC45t+McwOWEiykllIQJtLsFU0NK9kAEwQSZU8KxjmDaT2VCAtv57dTBHRsbcOO9q0Cnq6lt1d7O/6BocdtaCYRjK212QFcuEpTGpmx45IgQU4cxZeWk9v6ncQSBi01ZtlWQ/vLkyhNrTdy530eto2rlqIeFW9xvBRJBhLPaBvCi+mZ45zTqmCianFZe2AFRl/PmIlj1xFeeJTjHMFdCsKWdVStkWVVdOyDiuiyjyLEMCYmC6eUDknPdyRrs0HQBHeVXgEfK5++/dCjjVG6Eax6AWbcHgcyg2qGbdjT50rHHrPPF+zOv2/PJ7O6b8pi8mqsHwLfgXDVsCUcNLBH4ll32LHepIuwyAMZyA9SkQ+LaQKYSZOVSZSAoB1lLBlgIS9XTGxbIMIHWKpeVs+WSf78seY/j9LwSfcMr2rcTELAlVwF8LwDME0Oy+JPjf/3DBcpDSHUkiYYR5EV3euH+re/MWPJWeN0BGpEgRljW9NF2mZO1s4fytgQLtmOVM1w1BxGBItRO0UF4IIlUKVm5i4tW9gJk5y4uO9C+pHxcCu/C0rZgtjwmvQBQMTI0ksS7VURJ0quRLJPOjUCwA6u4ZOdPvPxr77wLYH2Xl5E9upFEswhCcjBJeeFHWz+G9synAJoDv5rFPp8VcXi8838DyxGDwfng55n9A/+Ye/lWrltGgvB+PdELwzAD0aVdtjKzc9bg82AII1BbSKICQIC8zy3sDObLoxaiSFgHw7/qT0L6chrOuQTuotnYVvpuIMUq1ICN5eCkUUDqQ/ZPPPpX3/6SiiTYCZka0WODstq3S68IEt1zMHLAH0EOqwK4ZoBmBm6pZAV0q5BuHxSrc1IWCtvrLFf5H32o3sJJCFbMWVEZHbKK3pw1cJZNH17fivXV9WCaP43IAXJgdQbcK5EYjB71RxCqAn1cXJKJuL8agLb/irRGYcxaCSzleXxiGW3xKUSSpxhJ1D2JRisHd7gl3wjKqMVwQ857DjSib2XkwNULAC8HEFX9FtUrBLjjGxnBQsAIsiKhbcUT16SouYOL/Zwsj2WlN4hqvq4AtCan5Z8kGO5oAtxvDPrzpbGQHDxcPzGqmYWMV8Sigox56WyUVoms6pocWL2Ab44Q9q3Xfe1tQ7JwFC0FfZ5u6UOQKQWYxRtyAIh7DjarEDmqzsmqM80tKo62bClBEVtYds6qDKt8oA/VaCbhQuVb/nxlGM/29LFdM8rgmoiYaB1Ys6DnKxdE/kaV1dSpZiFqUpLal+kBMu+8T4fG59MqMAEPjeACYf0dEqO2DnofQc3MsOFIPwcnsKMo2LzMuPeQZT8XVidhc6X5zLp/JaMiLO+D+QN4KwPbI53eh4CmWqNdF1APgrBePBISBLXkdoWKqiu7jk/bBGA7H2DjZli5RHP58tqAD9PwvKC/Umjtqu2PHGm8SdshPPQBOnpywfcc1FXdA7B26ZdEXVrUpno5b7D7BRaQHbpEtaEMba/TUyx9CBJZHAL1WeyIFIvuH9rh2OoGux0ZaZbHi0zRRzDtCKLbredKtq78ro8JjSSdREA/gnRS2wbzJiHwogBvtMImcf+0a0IgqoTvN70aNHPs6YYgG8Gz/KjJtkascjCC3x68qK8cCS/10f7HA7a+0moja7a0zxCkJnzsAzGI3pEX/d1B2V/A24zEvlg189LtAB+hSg4GUW9TqpFENyl1kMcQZCMrwH94T122cmIsOBe8yb9Q5FtL1rVNA6bTkyfIgodHRQRGdocxIWQjH4j2NW3vmDz745BqZjGKDFt/4p1ZenuwUP6tyNjoGyHZB4ZeVc9GMAiyevLEqpr5RnmHD+zUTh5I3HgnFHaXxKnNJNWsQiC0ZFkEwRJf8ZsUj4BenRXjZU33KKMImuhlkROjwWzw98XfzJZy14790B3KXGH5GIBR/5AUjttjewaOGaDjLL8255l4UajeMJNLzSVwW1ag1iw6vCg5ms+qOQF67SpDkDiLKZL4VlEMi8uD57zbS+WZA8FVQz90x3PPWA5GQ9X1cgM9scAIabuuEGMgC0bPNUcQsIqvUxmQ3IhhzDlOgxePqeaUXMT1C2hY+bgMd+kmJSFgCJKEUBRJSJJtcsb/YOl/Zt9Weal7xh3PnBd5p5TQTkFbP7CtIF8U7rPfzL70nictZ3GrsDIgV2AjniQVr45zjCD7cdlWRXpyUPrBzYhFl+FYEUdImJqJHRxRiA9iVEAML2zv1cuqmtlumgOGIPWYGh7G5laJI1PRj5LR5MrKkxXWwAgHSU7OgYnsu//kx94s76inuKRzXn7vuw6Bt9tQ+EUQEDaMiQUgiDqKxhmjRtypSeVuxuOGIPVaXZGErs5qexgV90jYwIqjR0ge1vC82n3uRy8b2/OWMxenpvZnDxcKXgHduuuaf6uwT+6bOuWeLBwtv2zq8BgGT+D9JQIDBlGwEyQfStVSgwWrY/hb86RaF5v9eFFsUv0IhK4Gd2QTngl/o33rMqE3quaNqsD5S44uzrLnOyZpOYXu3SeD5dlauDMhCeuwmmUlM1cKODUDTo+22uRgljzJpOYRMARpBjvEjnpSfWfVk5M5p1sIxN7gdUsoU65BQBcEDEF0sYSRQ0sEDEG0NIsRShcEDEF0sYSRQ0sEDEG0NIsRShcEDEF0sYSRQ0sEDEG0NIsRShcEDEF0sYSRQ0sEDEG0NIsRShcEDEF0sYSRQ0sE+oMgpsORls7VD0Jt0BcLHY24DEGa6bQljqFn38EJjHbgNAIoHV38wq5M+JvUp6nOURVparTpy1KjxJTlkmovWDfsmUybK787PGU5B08/IA7vTbev5VRBDWZbJfBqgkyiGzWcFL1MVc/RNK18sFqYK4oY2ENW8K9aGGaZLNVz1nySQNAq5BHVS2LUmuvNz3YhwFqNI/ZhD8wJYWG1NZhQ/Y75w+7OAQYTY9DMfJHnHVX+dzDmks4dOjR5zH1g4qBXLSEiCKLGJHQjOZCm79x2uWuLXRgjNAxOi+Wzq1e1+RMzumPBVQxJKgVFcUBeZeWwwqS0BtBb3F1F5zXlckAGwYUlfExF4IMlHBKEGTtwoiHJGrQ6/1Nwmj1FjEVY5BLb8nY6IhhEVIhpkCB6wGwYjYyBl/62PddNPnlD4MznpY2FpwIuLtbZhPXLMDuYWKh4mWd/M3HNsySHmITAE8q3pDs5KeyJCVABO87fuf31GIDzEWHL14MU2/BdTSTNSQc66XBqng16N4cBzYthmcHCTL61XQ0JUsFhPUgQSQUO/MGFaJhhzTssLgPLqKgTP0hifXZmT4sIsEkl7Tkp/C22WLhpwF7anRf+CEYbc+qU2rUVxhKjniNHApBp4CbfnX8N4w9JRep0OoEcKER6bqYyvf/Tv/5FINx75MS1v2S5FNxV5MCPC3du/ahti0+i9h2Hyy1hF+IjgkdVxuonr2xzUvcQYf6sT9Q3/mWEqFUUhK8e41kZUDyLWJJHJTYDyNVEiNUTauVh9rcHgZAc81i2dLfrTL99VJR2ZjF5C0zI+SlohdqWUHaM7AyycKYVNljwkVJS/OCIaPlHKLFgS/+t+z5z8s5TH9//VQqvYt/M58Y/AG+8C/V3FrKehXxYmhcTBKjmi9KSmnZuC5FUiAIZ1WiNmk9hnIPkhHjdhh0kEjYe4nWMPFuwhw1h7jOpwwgocoglTAmx03HOv2NMlC7NWk4JjWa4D5dPXG+11ftUlIjMpypHdREntFP3oDi5s59hOViaWizC986Bmhlw4I79nzn1PkLnXvzClj1Y8v52eFkZB+dRheeqNTc+000oD86uSuWnKj9OhhXHCDF+cvaOLGd7A0Uupiv85ixN1U1oaDhzbx4S5e0Zyy2iuduA50SGU9ZTtleVNkxJ46ZWxylPQmkuOLoIMVDBBrdd+9mHf2YHFZtMeQm2uZXk6DVzKzhJKtY4bGpxBVcgXNW81/TpBXkZPSx7EcFij2sXr8SjlUobpjKt3nikRg66SbSxfkYrxJJz2LHL8Z332mivvwl+VQJjMUNGepTtiAOADRGsnGc6u6yPYklHStzkmRLYsrRLV2REkANb0BoKq6QexoXtPDw24tTMlngDv+zBHj5/jnkW10P6giFR1AgfyPeQ6L0pKh6r+CPAuj/cJ6piuZYkXuTI3Ywaw/AoNRVlbxqoptQmbtSEpl0HGK/5kDaaCbXaOmpX9l3Lh2FQYr12MdTpVxxdU9EUbBBoHQH0fTHVbOswmhz6F4F+aTj2r4U2k2bouqGbuloRhM/Y0GlEO5B0M1rfyoOZ8HXTTSuCEBw8cGY3ETxpi54n6IaYkaf9COAFHDJFxeiWdaseV3d3b7/qDeXIt3xyTpTRycVHnypH9c9tkcIqHJmY1JAdUj0ZzSo8TsW9sFMRgbuER2IkizZJK4KAEgJvZDzrAnrl7pSjeO/EFZXUi426UVtxIq7lq092WekKRVT54WsZNq5XSNYO+7M6UXEWX7rUdmdH3FYDPaIHOGKLIH/Rkhm8nEO3d42SXgQhMHxFc1YsyJyVsbbIAXRdYydG1QdA4ZZgkogMPIuvddkzlIB3hSAolx7EBXQ8vJv1to227sjuSE5WiiX0AsToS/S2Vm3RbumH2h62UZ1YFcK8yVacgeZJic0qWgkdS/EWftbyBy/oRg5Kpx1BODaEkcP+vZhBQ8uXY3IQkSXsBkOiUOqYhOMqauBvDiejZ68yQ9JlMTm2dIiM8OAzY3CkkRcyUmxvKTtkNldCf1NrBJ44RkWRP23YHf1IBwwewhCcHASArgHeqMPrY6RZ5g/JwZtyOTgt/cHzYRTUq3lFU2lHEArFKMK6XzwtZsUMBkKNWXn8czHOkERJThLnZqw/oMvi4zBB1xyITsPIQXKA94+UF5zwKU0hzoVqqIdVpngklykGRSv3CCMH/rFTnYsvMS5ZI7/27EYMqWDMUP5KBIJdGF9Hq9XMGWIi4oBHqot5pmQF2Xkhc4tkSrjVvLRrB8T057Y+27XS6ygYRFHGx2hDNWw5jiA8EUjTCbfg7C+M/8P0vz75rfEtw7ab+hj7qmrb0axi5PBBjp03PU9naMmZUWWLA/feNFiq5G02t1REqRaW8mfFu9Y+8+HPXtz/2V/cbvnZv4NuGGLA7uq1SRKKyMe5dlhZaHbPsRZCPSPICilFJqwfMWKw6vwrjq7+GpmFwLOOgjNa1pXvuTCz+qz0f7XarFopMZ5akGALK/d1+7v0BhbR0uIjleoIqQSRGDECdHPiXT5bofom7QlSbTyoe5OEqimKIKq/Moa+qPH0p6dEdi8GUnXdBKpZRZq3njDDhm1hAdDWc2oth32n9zknJyawonUlw3tt5Iat/vsI3clBdPQnSCM2hPtVzYPqSTnjqSnL33tU1WyN5KT1udKaaGgB0E4pg9k/otrfJfKdKqar+fKZUV+ltdVqodBX6umlzKmpEG4114JeorVLmr4jSLuAMfkYBIiAIYjxA4NADAKGIDHgmEMGAUMQ4wMGgRgEDEFiwDGHDAKGIMYHDAIxCBiCxIBjDhkEDEGMDxgEYhAwBIkBxxwyCPQXQaBNtatJ1bRTU9Vv5rPtCOwrhP1L0FOx7XlrkmHv9MWqs5sfu8Ozv2sQhC9B91n7nKnDazugpIt+AWvaWfum0IWqTZ0VJ6FbtZtHuqqsKu36t+xBlXQ9LMNhst3FeJVgbfyhP0GqxGAllTAwKDIRgwiGTAVcIM/ae/RkGb15+yrJCXbE1KKTmeolLYSDxUBUN3xGkuRoEs5ighNNd/fmHZPEYAPQ5ZBupABLN1TQ6Z3uX2sIwYumwYhEd5CXXXjra8fOLs1XacZdqaatGZeD7sSzc7ngwIkTi/CjF6VsQhIOmLrsw98dGPHOOm4eI2+7mJawLMiZuwsXsbjkoJRYZVJIrPinhnjUDic4SXVzFw5XkwKdfFpZ26TniEK4M6DGeFUpvemBrPfcYN6/6LiyZHM2+thE90OT2A0Wnaf9i5nH+V1FlNir2n8wEpPu4GGU6Qi855GLRfm3B44fX1ADcbF0ZCOlcmFJRo5dH/neYC6z9G+49mroNY+8HDVtQiOZtetcTNYjg8CzM/mrhZO5ErZB1F57F7imMBgEHMdSgNmykx+et7ODWO4P10RRZc3ZXf+pXxOLbsN6CFMdFB/dOlp+LD8YlKK1S1jlcEsgCc7hUp6XIt6Mh1fUCjk42tEEaiCWofQxDLezLhsphbVloUAtGkvqnqNgMXKUMyMkx3XIgKtocUw6UuNZ8qpWEywTSL8yBqKgIlLDbRMF4aB0q7w46BdnR5386Kw7sm0ag/c5IjHx2lblbfR6vQhCcvBeAysALz68dUv5sYEBOx8E9kC1tk1iBtyEYYe5eFYJa7vMwG+QY2vNmkZBXXE+DK4iCAfUz0k0t3hsCv+avYNgswrV9DyyATnELGqLiCDJ2KyQq01foZ7EaE3HxrJ99hAybSAqhmTwFi6MW4HnZMYuPdsmodqajV4EoWq2L0tntg0rcgzhWRTTMuzJFQzchPGDlOCTFejHWfu6E0F4E0oPQsKNiHSncS+yg79aTZiGMNINC2dzVGh39INuLBiNJkQOZZrGI4BwHN9bmhu13Fw5M7x9Wrd7En0IQhK4iNYLebf824FBkWWtj//hrVxTLoXbRdqvi+GDZbN43pi2UQxitcwJfsH/riSWvVx7NSkBsLHtwF+6OOoOjMzjXgYrzjZOtCYLT7xMrycIaIb65/PZYNZ2hAuHaoEcKzVn5Zb2trb8lb/N95UIgAxoFlu+lwnKiwOqRbzycJe/60UQVEbBvEtqdKtV1GVzbNbiw3AYeJVMt2JhLeT1IYiSBE9xK2bRq1rG6vv9km/k29RsaBNY+hBkpUK6VSMrZTPfNxUC/dvLbFOZ0SjbKQRUe8ZU2J2C1+Tb2wjwkYHESyeJ5+rq9WZvq7NGesP7NYC0/2f4OBZA9xnWfGgg0NlJLvAFz+N4X5AHeK0+0G4//i3kiN4+y/Px9pn1WkClM5fCnTAHfxvf83RGzDpzVW882e0lj0epTyCCyB/jJVYOlQBXclJH68xJz9OUBlyYBWsd9oM+eqIcSaVqWrpOETUu1yfuff+B44APATTJQrEHbTcTfAPaPo1tBO9rSj1NEr5wYj8sXxQxXawHJUO6ROY0H+1GQDECT0JlCV1EFgA+e1s3H7DVG/RucIxlht0u8K0MrUaw4xnfydxnj90y8zjCyR04IQvVhnEaa142TxhRGDhT21AmO/cogNnpEL/rTuHJEvdSogyCzHcD5rqF7asTQ8eSgY81BrEaGHsX04aRHRtUlWZkU5+faW4skwNTyogfg+jGg1Vp7Dsf+edXPKneg2z5pwv3gga34pU/2XMJZBsCMTJwMj7lSm2DkBgswC7TkKDaHyfJ03mcnRX4wCGA/BV7Bu+a/Gr0YF4mdRIB+jFrMwxn8yvPgy5YyhljVNihsu4mF6we2pvNG3TEVEPl6Aed3+A3KAeEkIMQdztkxuhIcfvJj+9jy8qyJyfDpWnGb5v+ShCI92Df16HgM1AQUQSDjdALtePbinJQPkDiX1WFMIywp9+qTR1j8yk8kdhiZXUxC3JMs2mFDBoLPyzPpBYQIBPoR2iiB95ziCbnYZQSMgwf/PBw3EZDkhrsGKx6YKt1a9iRtrObUNEOZUgfAeEP+PymI5z3n/r4/q8SDErlTqiJBCDdpCW2TciHsP+h6Tu3Xe7aYhccb9iL3JAXdCqh+hAOWGjnikXvXO4W4PROWUYkkBjrsEFincWET1RcGLCDJiHE9MEKSqvIobgTnmb+poKAalMxasDZPAyAQoVFZ1eRhHVe1WprhaEbojKWwZgsL/6XLZwv43ERHhqlkFj7YnAeKtUFz6s8+5uJV6v1Ojl605qA90HoyAHxHTusSSg0Ib2tt53/PcTjlno69++vege6ugdYPHkJvq5WKIR0sf6uDjJURmaIPTl1jTZVgWQBAzjDgY/u2NESrPEYoEbjBBt59Ob97Yl/ecVP48/u7NFDk8dcOXEQq/WGaXUNDXIo/aZUu6x6Tuc/T1viGEo5yPKzfP4M6iKgAG3c/6B1VafHK+t0XlpTQiwCtAITbwvpR0mtXb5sxGN51YgWfB9nHZ6ynHOnj4kde88hs2bHXjKnxtJUAQ+mVpCDV68miMoP9XVBPcVqLPdWzgaWBxHpmAXu0gkWvy7/ieOHOgkn85y485ihSWkiUDVf1UK1yqaxw3PAEXXPchT+B49Qo81qXZXW/g0IklbR7SnHkKI9OJpcNkaAj7hMMggYBGogYAhSAxiz2yBABAxBjB8YBGIQMASJAcccMggYghgfMAjEIGAIEgOOOWQQMAQxPmAQiEHAECQGHHPIIGAIYnzAIBCDgCFIDDjmkEGg57uadMOE1d5FvdDNpSorceoFebthz7gyDUHi0KlxrB5H4zkcrIIP9MYLPzPFYRWx953GwqL1dlGOZDhSOMKRdv5e74Jzwh1X3fuYWTjDd0yPWQx2CGWxLKxgt5IvUc7mIw4BQ5A4dGodg8ejOk50NnbUR7d99FDlh+Xt/vnPuSKUtfdkUwuLqmmMjt794Yvilu9h/IRaO4WjPhP4GpJiFEMud/rhxMccfJFwUS3NN91+Q5A6TE4mKIciMZDg7ZyBPouf9Yy75ozMuVIQ7Pn+NVd9uiy9RQzrzGDJsmih6joEAMWk7dqOv1Ap57cNfPKlf3xtyc5uwSD8ASyuAdFUL/ENMwrsrJzJjXi/G9u19OOhS0rjGL05HnjCkGRDuNbtNARZB8nqHSvJAWI4fiCHQRMO7KnrAUc0zEVgHOpLMNzhFs5KgcGcGAeGHBJjUCQLGklY6hB0zEu3PC/2nvlODhN/sfzh6IyYDy7X6Fo35Hd4N156/fwPrnjj9Kn8aGWXV7HLkEERP+bqzX7IECTGA9aQwwU5tuB0TijGKrsu91YjIkMycRjnDNpmuCmAx0fjiOvKBEVhDUBcwouEXcmMb/VtB7POqOZbrI+rliAuy3jzzt4z/zm+Y+bMwH37//K5h4Z3lnZVik6Zq87GYLDZD9VVC25WkJTjqPpe2iQHnDkL11b3AvViAp9e6X/AmzU/1/SjZ8Lr69wgBtpSKmrweojBgZe8yeFn7Y1EZGJTa2lwj7d9+nj+3ae/tfPK0rw762SCDI7VR1KVzab7YwiSZHLW9ZJzJoEcqLHXOHzS1WuOr+TKmkP1/Fx3OV27vo0ksoOKKA1c7l3ywkODb3vuV2PP2ZifGWldtvXIsknOMQTZwNDKa7if0QMt/wD3HHCi5d0bXFLnrjZkUWdJG5/G8vFg2B6Su5//9fD+0oI7B5Kwnd1tyTaWt/t7DUGSbIClIdCewTuIqK2SdL7ux/FeJHBzcmjxD5krijOZcyAIoosJIjXsZgiyATBrvIUz4K/ZtcFFPbJL3bRjCWHXXxLDlUWnOgFU3yjYZjsYgiQAygeyaH70mf/wbsrDU4JqwyrmTXwCPv1+2BAkxsLL7mOa6DEo9fch7QgSoFeGbpC3L4Jop5pWULcP5/appQ9B1EyVFu6IOf28Srg37p/QH+oUadai/fjkucUsNLs8rDhQNapbIvUiVRMJ9SFItbtTYL+gCTbtEwP2R9eQALM5s5Nj02EErOC1DLF9RhBqReXkeQX6kSNQsWmY2mc35KQHQfCYyCoUQkREcAJgFdHSYkdA1d7qA28QJT/w0B+Lr9Bb8G7GVIkXM5g1vV8S4VCP0UURyp1Sap3aSzW1MLseBCEq+6YUILMl6ycA7AS4MYod6HTKbhVhlckT0tooEgpWMvF7M4kmtpEJXr8HMxUPC122J2UCv0QP0qWWbUErmrYClLEmoDxVLA0+qPLaV2gJ9xbkWXepPgThQj6HDzsHjh9fgJR3wTu5jBqWghNYIDKMJGGIUS0MfO3sZ7VIltlMq6ha/4EgNsixOOd5ZbxxbPmmCtWFcAO/nPX9pahH7zqj1r+DGMIFlIKhpiHpUvhOKARsa0msCYjZ3KX1xWfufseiODzlyImY/vv1K9eWM/XqzTs1hV53QuyW8rtPve5Vn8D73U8BwG0wGlYr4tolKqVVu6CSRuWPZbqq4Z7+jUTviU08AcTASA0hZrzK0rPF0hz2LUfC2IvrOAgJRN4vzwciZ5dtewA/satKyYQMACTOoGxUBJhKrsnHEScJF7br8DJ8bEJvVeSw5Kd/9+XCDwAXD6YoS7JOehGEzYYQJOuK/3v4S0+8/rqnAOCt2H0AqqjFVZbhTdat5TM4dAP/bce2o0gLJ1I2TMgaTliR0r9QKi8+Xy4vkFUkS/tcEJQAIQa90qzjZLyy7QwENvqM1EPeKskBtS/sMThpHlnZYHT7xIuFB8WwJIl7DkseBzW/SHKoSyBTdDQ2hzQP6kUQag4rV0nCSHLiuuv+ezRn3Yie3geEHWzHk6CMGnTUYZTwPgasEMVi4F+14Ht/BmaQL4wotTkK6/JGnCtZzlf88lLge1wiq73kCBUnSPS0vFdeyAq7WLHtbCBsl02wOGhCFdh1HmtSFud+YNnuo1i3Oe9Lu/awxLgMGzgGeBi2sMgqnlb5wamiP/igalZRZg3JQdXYZmhAxRRPJWiFgm0dPdrQ+It2Szj1sive6DrOfXBGD0ixV1+sA7J8IsqQQ2JUf/OzU6lBI4JbjBfCHZx54i/+9Kz8Safkqidf3nNYU4c5jEBLR9TnJn0tmgSM5JjEoCLcvKPC5OCiROdcm02zv48dOqSia9Z183h5yVV4sYXRgI4ft2Hd7OWokYbVGTXq30hwnm+J+fE9ajXZyUjXZrFq5DqIKsTkpE1icDVZebTAoJsGTI2IuXyufk2sZdGiL2qZapoURuUufK49pSO/Dx5U2aJ2his1VoK21l6jRiBBeaQ3YwOqDWq5JrO6fwKdI4x5eGrZA0l/gkQgpl3LnIv4iJoZDOkVl2/M42xMJcErzh07xjo8RSVTLKoxSNadrW8Ta52oZodBIH0EDEHSx9yU2EMIGIL0kLGMqOkjYAiSPuamxB5CwBCkh4xlRE0fAUOQ9DE3JfYQAoYgPWQsI2r6CBiCpI+5KbGHEDAE6SFjGVHTR8AQJH3MTYk9hIAhSA8Zy4iaPgKGIOljbkrsIQQMQXrIWEbU9BEwBEkfc1NiDyFgCNJDxjKipo+AIUj6mJsSewgBQ5AeMpYRNX0EDEHSx9yU2EMIGIL0kLGMqOkjYAiSPuamxB5CwBCkh4xlRE0fAUOQ9DE3JfYQAoYgPWQsI2r6CBiCpI+5KbGHEDAESTAWpjjjpGp9l6hT+L/vVGurQoYgteAshAeEDOYxKSdWM8CEz+G81LWu6In91IG6UCelG6WOdO0JBVIW0hAkAXBfus+gqj0Ph8pgmt6ejyVKB+hCnZRuCfpv9sMNzpy/ieDiJNlR4+pbV+/+D8zt/H5ofxZ7s73aNEH0gL2tMj4vwfJtX3/PY098SFl0ha6byMJ1qWoiSC2YQI5jh0Q4ubcQdyOCXIB7DYEcJTparct03a/IAdmVDkoX6ISklnnoy7us9ljCRJAEHCex1McEpur/ztW7Pwpa3AVulFELz+EytuNZwehOFs7bzqUGAkQ/riabxe9b3/XYE1+p6pYAwaY+bAiSZP4VzY9vX7PnA7gLuR2XvAS0KIEoWNvb6uoKWEnigwxY+4frEFpcLOdprOVwx7sfffxedd0K3RLz2aQnGILUYfiVNe33r7lij2c574PDvQmhYw8cbriOLLp2Cgg9j2bh4yDyj13L/8afP/rU4xRmpU5dE64HCv5/TkFf8RZsb3gAAAAASUVORK5CYII=",
"attach_user": "rushabh.jpeg,data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD//gA8Q1JFQVRPUjogZ2QtanBlZyB2MS4wICh1c2luZyBJSkcgSlBFRyB2NjIpLCBxdWFsaXR5ID0gMTAwCv/bAEMAAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAf/bAEMBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAf/AABEIALIAsgMBIgACEQEDEQH/xAAfAAABBQEBAQEBAQAAAAAAAAAAAQIDBAUGBwgJCgv/xAC1EAACAQMDAgQDBQUEBAAAAX0BAgMABBEFEiExQQYTUWEHInEUMoGRoQgjQrHBFVLR8CQzYnKCCQoWFxgZGiUmJygpKjQ1Njc4OTpDREVGR0hJSlNUVVZXWFlaY2RlZmdoaWpzdHV2d3h5eoOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4eLj5OXm5+jp6vHy8/T19vf4+fr/xAAfAQADAQEBAQEBAQEBAAAAAAAAAQIDBAUGBwgJCgv/xAC1EQACAQIEBAMEBwUEBAABAncAAQIDEQQFITEGEkFRB2FxEyIygQgUQpGhscEJIzNS8BVictEKFiQ04SXxFxgZGiYnKCkqNTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqCg4SFhoeIiYqSk5SVlpeYmZqio6Slpqeoqaqys7S1tre4ubrCw8TFxsfIycrS09TV1tfY2dri4+Tl5ufo6ery8/T19vf4+fr/2gAMAwEAAhEDEQA/AP7qv+FLfB3n/i1Hw268f8UL4X+n/QK9fXnj2JJ/wpb4Oc/8Wp+GvUD/AJEXwvxn/uFH8zx26816Znr8x646dPbp/nj8Vz7nj2Pr06d+grs/tHMP+g7Gf+FNf/5M8f8A1eyD/oR5P/4bMF/8oPMv+FLfBzn/AItR8Nhzj/kRfC/fHrpWe/8A9eg/Bb4OAEn4U/DUAdSfAvhfjp/1Cuw/+ufTo/G3jnwV8NfCmveO/iN4x8L+APA/hewl1bxN4y8a69pPhXwp4d0q32i41PXfEWu3VjpGk6fDuUS3uoXlvbR7hvkGRX84f/BXH/go9/wRq+J/7OXiTQPid+3x4j+LenJbSaXY/s5/8E/f2n7Wb4ifGvXdYElvp3hPWLT4Ravdf2vol7JEIr1fHWtW/gSwiVZ721n1KfT7a+P7RzD/AKDsZ/4U1/8A5MP9Xsg/6EeT/wDhswX/AMoMH/grF/wWZ/Ym/ZO8M+J/gF+xp4W/Z/8A2mf28vEQvPC/hrwd4N8L+D/F3w++Bt/LFLbah8QfjZ4p0nS7rw1YJ4MfM6/Dsas/ifVdZSy07WLDRtJnuNQT+Qz4X6B4p8D6NqcviDxv4h8YePvGOv6r42+I/jTUL+6W88V+NvENw97rWqFVkSO2s/tEjQ6fZQxQwW9uisIhNLcO/wAwfAb9nCz8KeOfFfxStNI8UfCPwn4i1u71PwB8A4PiNrniqHwX4ek3Jo9t8QfE6ppEfjzxRb2OwXksuk2Wki8e5uBpdsZLbStI+uvEfiPQfCOjah4i8TatY6HomlwNc3+p6jOltaW0QO0bpHA3SSOyxwwoHnuJpEhgjllkRHyq4nE10lXxFesou8VVq1Kii3o2lOUkm1pdHVhcsy3ASnPA5fgcHOpFRnPC4Shh5TindRnKjTg5RT1SbaT1tc5Jvh6uueMh4+8eaxqvi/X7MtD4a0/UNQvZPC/g2w3h44dE0OSdrO41SVo4rjUfEWpw3GpXV7HHLYrpNlFaada+jav44vdEt/PvNY8SS53eXbaXHr2s6hLtALCHTtHivb6VVJUPKluYYy6ebIoYZ/JT4zf8FFNTnuLvRPgpo8FlZI0kI8aeJLQXN9c4JX7Ro+gS4tbOIkboZtaF7NNE48/SrOUFa+GNc/aF+OXiKaSbVfiz49cyks8Fl4l1PSbLLEk7NP0m4sbGMcnCx26qBwABxWB3H7reJv2ifjPa+Yvgn4EfFzxQEyFutb8S6F4OtZz2khhn1TV9VEZGCBc6ZaTdmiHBPzH4y/bc/ay8G/aLzXP2f/EGhaVCCzXl9e+L9SsbdFGWafW9Pgi0voCd37pSoJAIBI/J3/haPxMHT4i+Ov8AwrvEH/ywroLD49/G/TCv2P4u/EiNVxiJ/GfiC4g4xjdb3N/NAw4Aw0ZGOOlAH6A6X/wVR8cRvjW/h8LyM4GdK8eavprqe7BbvStVVx3CFkyeC+Dx6VpH/BUnwvOyjXfCnxJ0xSQHfS9d07WtnPJC3V7oRbH1B9s1+Qfijxp4i8Z3K3viS7tNRvwSZNQXR9FsdRumIwXv9Q07T7S81F/R7+e5cHkEHmuWoA/oe8Gft5fBTxnPBZp8TtX8M39wyLHaeLxquiRhnIXbJqzSXGgxHccHzNWQHOV3AE17V450Cb4k6RHaT+PviVocMsO+21XwJ8RfE/hqZ4Z0DK//ABKdSXS9SjkVleM6hZX0RQgx/Ix3fy717j8OP2kPjT8KoIrDwd451O20eEjy9B1NLbXNFjTOWitbDVobuPT0c8v/AGabN2OWL7uaAPtv49/s7/HT4P6TqnxQ+H3x5+I/iHRvDxXU9Rg1LxZ4isPFmk2yzIrX8V/aaiLPWEtS4mvZFh0qaOBXkS1nVHxl+Kv2orj9ob9l7xH4b8XeJ73Tvi/8PW0XxHZs2qXFkni62029h0+/1fSwk0SjUhoeoak+s6VB0kT+0bOEWLSw6f5X4g/b3+K3i7wV4o8EeJvDXgS8svFPhzV/Dt1qNjY6zp2owRavYT2El3GDrd3ZNPAs3nRILOOMyIMjbgD4boA+v/gB8f8A+yvK+GPxX8UeMo/hrqvifTPFGneIdE8S6zpniP4d+NdP2Q2HivR9Usp2voLYxqkGpxQB5bZFTUrFBPDcQX39Ff7D/wDwVi/aQ/Y/8QxXHhHxx4K/4KRfs86VJDceOfgJ8d4PC/iX49aB4faRftGs/C74zX2i3XjZ9Rgg2ppui+L7fXvCs9tCdP0zRH1O7iv7X+Y74J/DPwB8VLnUNC8V/FbQ/hTq9r/pGk3fiDTHubHxAkyKjWf9pXWvaNpWmy2EkW9Ypj9rvhe/6N5/2Z44/wBjfgp8Kb248M+ENR8ea3q+v+IvAGsFfCHimLWvDur/AGzSbGL7Ms3h/wAV+H9M03VdT8DeJ9PuDDeeHfFf2u9geJ7WaWRbKxvpNqWJxFDm9hXrUea3N7KrOnzW25uSUb2u7X2uceLy7L8e4PHYDB4x07qm8XhaGIdNStzKHtoT5b2V+W17K+x/pxfsMfH/APYW/wCCh/7Pvhj9o79m3wn8Ode8H63LPpGv6Bq3w/8ACOm+N/hx4106K3fXvh/8Q/DyWVzJ4f8AFuhPdQG4tfPubDUbC5sNd0HUNW0DVNM1S7+wv+FLfB3IH/CqPhtzn/mRfC/b/uE1/nT/APBLX9sS/wD+Ca//AAUV+G3jebVpNJ/Zb/bM8TeH/gP+03oDSmHw54e8d65dTWnwe+OJgJW00690LxLenR/FmsuscCeE9Z8RTXCXGo3ttLF/pSA5A5PPt/8AW/Xpx1652/tHMP8AoOxn/hTX/wDkzj/1eyD/AKEeT/8AhswX/wAoPM/+FLfBzOP+FU/Dbv8A8yL4W/8AlV/n8DQfgt8HM/8AJKfhr0z/AMiL4X7f9wr2OR/KvTcj39AMEf06e/Sk9OT6dMfoRwP0FH9o5h/0HYz/AMKa/wD8mH+r2Qf9CPJ//DZgv/lB5n/wpT4Of9En+Gv/AIQnhb/5VUV6Z+Lf98//AGNFH9o5h/0HYz/wpr//ACYf6vZB/wBCPJ//AA2YL/5R5L7j8Qv+HiHx6I/5B3w65/6l7V8nGO3/AAkmTngfz4o/4eI/Hrn/AIl3w74/6l7V/wD5pM/55r4QyfUdSeccE49c+3PXOc9CaT8Rx2xn/EZ//VxgV/U/+pnC3/Qiy/pb9wtdvP09V66/5Yf8Rk8U/wDouuItdn9da7d4776W16bH3Tcf8FCfjndwT2l3o/w2uba5ikt7m2uPDWqSwTwSo0c0E8UniNo5YpY2aOSJ1ZJEYoykEg/xC/8ABXD4KeGvh7+3n8DvHf7PPwS+F/7LH/C7dH8T3/iXxl4Esb0fCjxz4x0y4Nx4g8M2nwemiuNF8C+Kk0lrK/E/hXX/AAxpHiePWpJLaystdtNRvb7+pHPGOP19/fHv6846cV/OL/wUl8b/APC0/wDgoD8M/hrbym58P/sw/BnVPGuqxqxMMPxH+M13Dp1raXEQyDNB4H0nTNUspJBuiF7K0QUuzH4zj7h/h3KuGsXicLluEweLlWwtHDVaFGCnKpKvCU6fNJS5U8PCtJuDjP3Lc3K5KX7P4BeIHiLxT4lZXluacS5rnWUwwWa4vMsNjsXWlQp0KOCqQo4hQpypxnOOPq4SlCNZVKS9vzOHtI05x+cfGnxC8G/DfQ59c8ceJdI0G0tbSWdmvbuG2nvnghZ3g0rT5J2u7+7lZStvY2a3VzI7JEgkYg1+Cn7R/wC0p4q+PviNjK9xo/gTSrmU+GfCqS/u0Ubo11bWfLby73WriInLkvBp0MjWdj8rXNzefrP8ffhZ+yxpWj6r8QvjD4Z023Ylg2owaprtjrusX7I7w6fpkGmaraS6lqExBMcGx4o4w9xcNBawzTx/hZ4zv/C2qeJtTuvBHh+78NeGJLjZo2j32pz6zqENsuFR7y+m5kurg5leKIGK33rbpJceWbmb+fz/AEAOVr6L+Bf7I/7S37S139m+B3wX8dfEG3Wf7Nca5pmktZ+E7G4zgwal4y1h9O8KabMOT5V/rFvIQGKoQpx/Ub/wSw/4II+B9P8ACnhP9oP9uPw+3inxVr1lZeIvCP7PupCW38N+FNNuo47vTb34p26mK41/xJcQtFcS+CZ3h0LRI2ax8TW2t6hLc6do39DFvpmh6PDHpPhrRdJ8PeHtNVbHRND0PTrPSNI0rS7VRDZWWnaZYQ29lZWkECIkVtawRQxgYRFFfm+eeIWGwVWphcqoQx1anJwniasnHCRnHRqnGDVTEJO6clOlB2vCdSLufrnDnhVjMwoUcbneJnltCtGNSng6MIyx0qckmpVpVE6WFcotNQlCtUV7VKdKS5T+Nb4S/wDBu5+1P4sitb34s/E74WfCO0nCNLpuntq3xH8T2YOPMS4stLj0Lwy0idB9j8Y3cbH/AJaAc17l8S/+DbnxRZeHoLn4QftNaJ4j8Uw27fatJ+IfgO98JaJqFwMsr2mt+Hdb8YXmlqw2xrbXOh6mC5EjX0akov8AV/0/z/n/AAFFfE1OPeJZ1VUji6NKKd/YU8Jh/ZNdm6kKlVr/ALi3W6aep+iUvDLhCnQlSlgcRWnKNvrNXHYr26fSUVSqUqClfX+ByvZpx0P84f8Aao/YQ/ae/Y0utL/4Xt8O5NC0HxBfXWneHPGei6rpniXwfrl5aRC4ktbfV9JuZ20++ktt91baZr1rpGq3NtDc3EFi8VpcvF738J/+CPH/AAUA+MPhbw9428P/AAXt9D8K+KtHsdf0HVfGfjjwV4bmvtJ1OCO70+7bQbnXJfE9kt5aSpdQpqOiWkjQMshRVki3/wB3PxT+Evw2+Nvg6/8Ah98V/Buh+O/BmpXOnXt54f8AEFmt5Yy3mk30Go6bdqCVlhurK8t4poZ4JI5AA8TM0Mssb+goiRIscapHGiqiIqhVVVAVVVVAUAAAKBgAYAGBXr1PErMnhKMKeDwkcapVFiK041ZUJU0o+ydGkq0ZwqNufteec4Lli4L33Gn4NPwhyhY7ETq4/HSy506TwtCnOjHFQq3l7dV60sNKnOkkoOj7OnTqPnmptezUqv8ADO//AAQV/wCCg6qWHhr4XOQOEX4m6UGY+gL2iJnr95lHB5rybxj/AMEZv+Cjfg2Oa4l/Z8ufEdpChdrjwf45+HfiKRwByIdLtPFQ1yZwQRsj0tnJHCnIz/fpSYB6gGuWHiPn0ZXnQy2pHrF0K8dPJwxSs/NprXY7avhJwzKNoYnN6Uuko4nDT+9Twck16NO19eq/zNfGH7NP7RXw/wBVOieN/gR8YPCmqhyiWWvfDjxfpss53bQ1r9p0iNLyJjxHNatNFJ1R2HNe4aH/AME9P2q7z4DfGT9pDxR8IfiD8Pvhd8HvDWh6/PqvjLwP4m0W88ZTa94t0PwzFZ+EdO1Cws7vUrDR7PVb3xR4n8SpE2geHtB0W7kv71bq6sLa4/0W1LIyvGdjoyujLwVZSGVgQOCCMj8DS/tJeLvivov7OHxG8UfAz4SaH8dPiang++Hh34WeI9WtNK0TxRcXcZs9TstQN6nkavb2tpLeXMvhk3GnyeJ47ZtBt9TsLjUIrlPSh4k46vLD0YZbhaVSdehGpUninGm4OrBVEnVhGFBShzRdWpUnGknztPl18p+EWW0IYuvVzfHVqdLDYmdKlSwKnVjNUZ+zk40ak6mJdOpyz9hRp0512lTi1za/5Y1fp5/wTe+I2tp4p8XfC26up7nw/c+H5vF2lW0rvJFpep2GpaZp2oLaKTiGPVbfVYpbpB+7M2nxSqFlklMv5yeLLy+1DxR4jvdS0Wx8N391r2s3N74c0vS/7E0zw/d3Op3VxdaHpuinnSLDSrmWWys9LJP9nwQpa/8ALKv0U/4JU2/wx1v9o+bwZ488WT+Atb8ZeD9b0bwR4ruFt7jwx/a0L2Wt3OieJ7WZreW2h1C10Uy6brNtf20en3NvNDf217FewSWH68ndJ7XSdk7rXz6+vU/BpK0pJNtJtJtWbs7Xaeqfk9tj9N/i/wCAG+KXwz8ZeA4LeS71LxDotzbaFFCpef8A4SSDbeeG5bdVJkM8WuW9hJGIyJGZQoIyK/sI/wCCff8AwVw/aE+O37Fn7N3xM1IeBtV13VvhdoGjeKtT1DQtVl1DUfGPgpJPBPjDUb6RPEESm71DxL4d1W9nxHGN85wiggV+ZnwY+AXwOg0nSrPS77SvGvjD4eeLdK1rxF4q0uSOYSeKLew+22mmR3flzI/h61+1wzJYWc/lPfWEbX7tf293APNv+CQEn9l/sweO/h1jYfg7+1L+0h8NGtzjNqdO+Id74g+zlf4Ao8S7tmf493cV+h+G2Cy7Ms8xGCzPB4fGUamXVatKOIpqfJXo18PaUb6q9KdVSS3sux/PP0k874i4b4Hy/OuGs6zDJcXh+IsHhsVVwFZ0XWwWLwWPUoVbJ83LiaOGcG17rcrfFc/pB/4eI/Hv/oHfDvj/AKl7V/8A5pPY/kfSk/4eJfHr/oHfDv8A8J7V/wD5pK+Ec+uO2cc9OnQ4Pfvj17CjJ9uOh/H29OcenbtX7j/qbwt/0I8u/wDBHp/X399f4d/4jL4paf8AGd8Qvv8A7bLyvf3dNLuz8+yPuz/h4l8fO2nfDn/wntY/+aSivg/P+0v+f+BUUf6m8Lf9CLLv/BC/z8vz7sz/AOIzeKn/AEXfEP8A4Wv/AORP6Uz8Avgdz/xaL4ajnH/Ik+HuP/Kf+Z9cdO6/8KC+Bpzj4Q/DUcj/AJknw9x0GDnTvXNet5xnJPPOcehA9OowOeOvSlyB3PJHVeeMHHQc+n6dK/lv+1Mz/wChjj//AArxH/yzyX3H+p3+q/DP/RO5F/4aMv8A/mc8j/4UF8Def+LRfDXtz/whPh44H1GnY59f0r/MK8c/EDQvjv8Atkft6/tJeGrLTLHwp8U/2pvHnh74cx6Pa21lpj/Cb4Q3TfDr4c3Flb2qw20Ud1omk+fKluvltcNJIdzsWP8Apo/tY+IfHHhL9lj9pbxV8MbWW/8AiV4Z+AHxj8QfD2wiDLLe+ONG+HXiLUfCdpGURpPNuNettPiTYpfc42KTiv8AK7/ZUg0q3/Z4+FI0ebz7efwxHe3U24lm1m9vbu78QCVi+Xkj1ybUInZjuLRnPpWNbG4zERUMRi8TXgpKShWr1asVJJxUlGc5JSUW0mldJtXs2deDybJ8uqyrZflWW4GtKDpSrYPA4XC1ZU5SjJ05VKFKE3ByhCTg24uUItq8U15V+2RL8APDvh2x8V/F3wlJ428Tyw3OleCPDyeI/EWlT3c0eJrl4xpmrWttp2m2zSW0msamtpLId1rb7Lq5ktYD+dn7CHwqtv2gv25f2avhqmj20GieL/jh4Pu9a0O0N5dWtv4M0LWo/FXiyxt31C5vb6WGDwpo2rRxy3t1dTbEElzLKQ7N61/wUe8I+L0+IfhjxrLbXt14Kn8LWmh2V9FHLJp+laza6lqd1e2N0yborW4v4ru1u7aSYxtfossUXmDTnEf1f/wbs+D9O8Tf8FINA1m+WNrj4ffB74p+MNILgFl1G7s9J8Bu0WQcSf2X421IZGD5ZkGea8LO8TLB5PmeKjpOjgcTOm10qeykqb+U3FvyPrOG8HHMM/ybBTt7PEZlg6dW/Wj7eDrKz3bpqdl1dkf3reJL0WGi30y/LJJF9mhxwTJc4iBXpyiM0g6H5DXhijAHH5+pHP0//X616P4/vCTYacrf37yYZOeP3MHr/wBNzz7HryfOfwP54/r75/8Ar4r+Yz+yKjblpsru17tvT09Nfz1S+v8An+X1P40f59KP8/5+tH8/8/5NBCX36dNel7676flvYKTjn1+vPf3+uO2eetLR+H+f8/yoD7vLTpp/X3dtSiiigLenlpstP8vlppoJ/n9R/hz/AFzXp3gK+D293prtlreQXUAJ58qUBZQo9ElCk+82cg15iPpj8vb0/L8PpUSeOvC3gO+s9V8U+JtB8M6ZI7QXF74g1jT9GsxDMVjZ2udRuLaLZDI0crHzPl2gZGQKaTk0optvRJJtt9klqxqah70pRjFfE5WUUtN23ZfN/k7/AMC3/BZv4C2/7P3/AAUV/aA0LSrIWPhn4g6zYfGXwzGkflQm1+J1kniDxAlrEAI4rSy8cyeLNMtIoh5UVvYxxosYXyk/OLwLqXivRPF3h/W/BEOoT+KdF1O01TRo9Ms59Qumu7OZJY1+xWySSXUMpHk3NvsaO4gkkgkVkkZT/R7/AMHM/hmw1H9oH4BfFjw/JZaxpN78Mta+Euva5o1za6lY6b4s8DeJ7rxj/wAItrF1Yyzx6b4gg0T4l2WrrpV+YL99Mv4LyOF7Y71/m18Ma1rWg6zZX2heJr7whfedHENfsL/VtOk0+N5F33Etxocc2qeRFgSypZ29zOypiK3lfah/pPhrFSxmQ5VXm+ap9Up0qjbvJzw96EpSvrzSdPmlfq2fyJxjgqeX8UZ3haUVGksdVrUor4Y08VbFQhG32YKsoR8oo/sR/wCCbH7UENnYeILz4seGfEPwmXxB4ftrnUNO8U6ZeWsSeINA86aI6SkkRvZrPVLK81AWYubW3vWuYrWxlgeV4JJv3a/4Nm3+Fnxcf/gqd4W8Q/D3wrrU+jfty3Hxf0qPxLoGkaxqmmaF8e/Aum6jY2nn3VrO0Nu9z4Hv7oW0T/Zkuri7MW7czt/Gn+zd8avB0uk6f4c8TftL6d8VPFuqS21tptpf6E3haW2nkG0adYvquk6Xr2vXMsjbRd6nKbifC7LOE8H+nL/g2U+LFl8OP+Ckn7a3wC1Nkt3/AGl/2dvhF8b/AAtK7bI59U+APiDWPh9ruj24Pyz6jcaX8SRrTxYMiWOkzyjCq+foaNeth5+0oVqtCpZr2lGpOnOz3XNBxlZ9Vez6nyOLwWDx9F4fHYTDY2g5Rm6GLoUsTRc4/DJ0q0Jwco3fLLluujR/aUPgF8Dv+iQ/DUnr/wAiT4e9j0/s/uD69enpR/woL4G8f8Wi+GvT/oSfDo79f+Qdx6c/z6+uZx3PTsPp0+U9frjnijOMDJ/L9RgH147dPx6v7UzP/oY4/wD8K8R/8s8l9x5f+q/DP/RO5F/4aMv/APmc8k/4UF8De/wh+Gn/AIRfh0fp/Z/H0or1v8W/75/+xoo/tXNP+hlj/wDwsxH/AMsD/Vfhn/onci/8NGX/APzOfiL/AMPEvjz/ANAn4c88/wDIva51/wDCn+mP06cH/DxL48/9An4cc/8AUva3z0/6mf8AU46e1fB+P9noTwT9M9hn0BycE9ehpcH09e47jHpxntj8eK/qT/UvhX/oR5f/AOCltpv7/wCHr5s/y4/4jL4p/wDRdcQf+Ffp/c8vz7n3bJ/wUP8AjvKjxy6N8NpI5VZJI38O606OjAqyOh8TkMrKSGVgQwJGCK/gqj8FXHwM/aB/a2/Zss7Ox0iz+HXxY1bxn8MdNSO4i0bT/hp8X4pPHPgrS7GIzSXEmm6B9vl026aKaSSOXfA0hnRs/wBdG3/Z9uvTknOcfl1Pr7/zwf8ABU3wYvw9/bY/Zs+MNnD5OnfHT4Y+Ofgr4peMbYf7d+HV3B458KXt0RhZL/ULTW73RraU5k+y6d5IOxBXwviFwjlGD4enmGVZfh8HWwWKoTrSoQ5XUw1aX1aUZLmbaVWrRqXa91Rl0bZ+5/R78WeLc38QKXD/ABTxDmGc4POssx1HBU8fWVRYfMsHTjmFOrTlyx5XPCYXG0ZR+3OpT6xSf85n7YPxz1Hxzc3Xww+IPwy/4Rrxt4G1hm0/WfD/AMQJtV0RzdRwmVp9FfQIrfU7XUbExTWcz3Nlq2mysqO0O7UdOuP2j/YY/tr9kz9lnSrfwZ8EL74B/tOfGX4S+INF8bfFXxnoV5L8XNVvvjP+0f8ACP4Ofs1eIfhvFr87yeCPBg8O658UvG+o6PPo1ra+IdX+EcPiSW1u7GDTr9v5w9H+Huv/ABY/aA0z4VeF5rCPxR8SfjBZ/D/w7cazejTtMTXvF/jOPw7pM2qag6y/YrFdQ1C3e8uykht4BJLscptP+jL8RvgX4I8Lfsx/CrSv2lviv8PdQ1P4NeGvh9D4y+PfxXj0bwsviLxH4D8P6npGm6/qHiK+1CyOmOL7XfEN/p0d3e3939r1a6uA8uqXd5c3P8l8b5nSwkMtwVSHtlisRKpUw8XUc6kKKUYRnSivZ1qMqlTWnUbvVhSlCEnCTh/qD4a5LVx9XN8whV+q/U8LCjSxk40lTozrycqsoV5v22HrwpUlarRS/czrU6lSCqRU/wCNb4tf8Frf26bf9oX4ueJ/hv8AG+Zfhte/ErxdJ4K8F694U8EeKdCsfBUGuXlv4Z0yC41fw7cazDAujQ2TSy2GrWc01w884lVpnz9zfs5/8HGet29xZ6N+1T8F7HULEmOKXx18F5JLHUIEBCebfeBPFerXNpqDsD5lxcad4v0lYwrfZtJlLrGvf+I/An/BMPU7690X4Ufss63+0hawsYG8Qfs/fBX4m6p4YlkbC7bLx5rEHgfw3OVJwLqw8QTWhOWhu5FXfXzX8Q/+CcHgX4n201z8J/8Agnj+2z8N55AXtLzRvGnwpe0d3/1T3nhvx98TfFN20LD5ntLLVNJkjO1PNTOD57hwtj6UMPi8gq4HlpwhGvKGEwNdKEVFTm44mhXnK2r56dVSfxJ7HsOlxtltapjMv4sw2ac9WpVlhaU8yzPDNzm5ypUlLA4nCwgruNoVqDglaMovb+iT4Jf8FJ/2Iv2gILMeAf2hvAFvrF4I0Twp431QfD3xYLqTaDZQ6H41XRLnVLiNjsd9EOqWrkFoLmaPDn6O+NPxJHwu+CPxY+Lmm2tprrfDr4XeOfiJY2Ml35Vhq7+EvCup+I7azkvoBKY7W/awSB7qFZSkUpljDkKD/B94+/4JT/tveE9WngsP2ePiW2lNF9psj4oPw40PxBJbs8ihpNE0j4j+Jg6Bo3jSeC7cXEiSBYYmUxjwPxX4S/bK/Z00C98PeKtN/aD+EXg/xJbXnh+/0+a48deGPBniK01W3lsr/RLhrSe38Oa1b6laTzWt3pzSXUd5bzSQzQyRSMrec+BMnxGIpvLs+pVIOcJSwld0K1WULqUoc9CrSnFuN1Z0Lq+tmrnf/wARH4iweFqrN+FsTRl7KcYY+hTxWHoQqcrjCqoYmhXpTiptSusTyvZXTP8AQ1/Z8+JV/wDGX4EfBj4vappNtoOo/FL4WeAPiJeaJZ3E11aaRP408K6V4jk062uriKGe5gs21IwQzyxRvMiLIygtT/j/APEO++EXwI+NXxX0uCwutT+GXwm+IvxB0211VJ5dLur/AMG+ENY8RWdtqMVpc2V1LYz3OnRw3cdteWlxJA8iQXMEpSVeJ/Y6tJNM/ZG/Zc065tbzT59N/Z2+ClhcWWoWdzpt9ZT2Hw18M2txaXthdx291ZXVtJE8NxbXEMUsEqNG6Kylazf20fCnjH4lfsiftH+A/hnpD+J/Gvjn4NeP/CfhjRrW+0+zfVdS8Q+H73SI7OG91K8s9NheVbqRVa6vLeHd8hkBbB/O1So/2oqMuSGH/tBUpc8rQhR+s8j5pyekI0/ilKWiTbfU/VHWxP8AYrrx9pUxf9lutH2cOarUxH1RTThCKu6kqnwwjHWTSUXon4D8Fv2/PhNafBP4b+Pf2kPi58JPht4h8b/Dnwl491W0vfFmnaPbWOs+KPDmneIdX0DR9G1XWdQ8Qy21jdahNDpFmz6nqIto0tpZp5ViL/I3x8/4OAf2PvhvBe2Hwe0nxv8AH/xHEJEtJtK0648BeBzOmVKXniTxdYxa+I9+Nk2leC9XtpkDNHcBSjv/ACwv+xD8dNP8ay/DnXNFtk8f2XlSav4C8GPcfF3xpotvIAwk1vSPhJb+NbLw3MylDDb+LdX8OGYSRyK627Gdf0I+C3/BKXxVO1pq3jP9lf8Aba+LUWUmGmaPo/wa+DWiTfdLQXcniD4p+IvEtxbH5l8yGTw1dvw22A5Wv018J8LYarLE4rHVcZGcnUp4XD1KWHw84Sd4qE+e8oWaSksXTTV3ft+Sf668bY+EMLgMro5bKlGNDEYzF0q+LxVOtTioTlVpKk3Co5JynTlgKsot2a2v49+0t/wWf/be/aHubvTtE8dn4EeC52eO38LfBqW+8O6rLAx2xf2r48a4l8Z3t0YiYrldM1TQ9Iutxb+xYshV/NzVND+LHjW8l8Q61o/xD8W6hfMZJ9c1TT/EmvXl4zHcZJdTu4bqe4YlixZ53JJyTzX9cnw1+F2t/s0aVHd+Cv8Agkt8VvAFrbmC3n8USa58AtX8UzMw2wxy+Jte+Kl54kvndlYrBJq7IrNJIiL8wPrP/DanhrQGjb4q/Av9qD4P6ZGyLqPiHxF8G9T8W+GdMi3ASz3Gt/CvUvHtmlvEuW86c28bAZ3KnzjvpZ7Ryxewyjh2hCmkrKljMI8TUXedLCrE1Zydrc0qlWT01Zx1OD6mcf7VxBxliZVpNuUq2WZjHB0pJK8aeIx7wOHpxSe0aNGMb3slc+NP2ZvC7/tf+B/gfc/Fr4JeKfi14Wk134Aa78aPBl14V8U3kN5rWlafqn7Bnxr8S38ejwQ3+m+Jovh7qP7L/wAfbfXtJu9P8SWVv4B8d+IrV1sbLWJov51v2vPhEvwD/af+O/waiHhlLb4c/E3xT4ZsYvB+papq3h6DTbHUZf7Mt7K81zVdc1sS22nyW0Go2Ws6vqOraZqcd5pmpXUt7aTtX+jV+zjqv7Pfxm+GPiLxB+zn8dNO8by+IdAv/Dt/8RPh5r2i3fiTwTfavZSJBKvh7U7TVoPCviTSpxHqen6Z408O3dzHeWkR1DT7y0EltJ/np/t9fDL4HfBz9qT4l/Db4CfFrxn8bvC/hXVJ9P8AEvxG8bjSp9R1n4hi9vZPGMFlrOk+XB4nstN1B1sLjxNJaWLaxrcGsXFrDcaaLHUr6OD80qYzNcyoSpV8JClSXJgZQruFFKqrc7lGnToTpqTpxUoKtiFJt8qo2eXiFklDLsmyjE0a+HzCVau/aZmp4aFXEydBKfs4wnVq4inWlH2snCpKhhXCMU3LENrE/Yp12TQv2g/CTLrmgaDBqsd7o15ca/b+ct9b3gidNG0iUhVs9c1a7gtrOwunuLZQJJoA1y066fe/1Qf8EhNQ8WP/AMFJvGH7TvhS3sZdE/ZD+GGp/C/RJ9Qiv30vWvif8ZrGeDxDb3A0/UNObVrTw14Miv7W70y6nlt9O1y70nURELgW7r/Mf+yN4X8J6d4a+Nfxp8e6Xa6v4c+H/hEabp2mXwP2XUtf1WT7ZbwxurI8d6k9hplhaSI6tFPrUUqMskaOn9sv/BL79nrUP2ef2Pvh5pviq2eL4mfFB7742fFe4uI/KvpfHHxKNvrMllfoy7kvNA0D+wvDVzGWZBdaRcMp/ekn+g/D3IKOe55/tlFVsvwFCeIxVOavTqznelhqMtVfmqSdbl2lChNPRn8VfSD4/wAXwLwPbKMZPBZ/n2NpZflmIoySxGFo0XHFZjjaXMmv3dCEMJzWvTq46jNaq6/fj/h4n8ev+gT8Of8Awntb/wDmoo/4eJ/Hr/oE/Dn/AMJ7W/8A5qK+EMf7I5PqOMdO3uT/ADJ6UuAe3XjPpj9fYg9MYxiv3f8A1L4V/wChHgP/AAUt1a/2vJ27+d2fwj/xGXxT/wCi64g/8K15f3PL8X3Pu7/h4r8eB/zB/hv/AOCHXB+n/CTcUV8E7R/kL/hRS/1N4W/6EWX/APgn/gmX/EafFX/ous//APCqP/ys/pKP7O/wF/6I/wDDjrz/AMUjo3qcj/j0POB/nPB/wzt8Bhn/AIs/8OTyP+ZR0bAzgcn7Hjrnv7V7Keh6ZyP4W59M9/59vWgkeo7Z4P1Hcenrnt1AB/l7+182/wChnmP/AIW4n/5b5L7j/Un/AFT4V/6Jnh//AMM2Xf8AzMf503/BTf8AaPuP24/27v2qPgp4P/aK0P8A4J9f8E6v+CfviTTvhT8Svip8MrCDwv4w+Lvx1vtR1Dw1qGjz6zoE+l61rofxfoni3RNB8OWmoahoVlofg5fEc+gahrGvwzad+S/7Zv7NX7Rfww+APgz9ob9nj9tef/goF+yN8K/iRpnj+e+125h8WeLvhN4ntbWfQftV5qcuqeI9bbwRNbay2meKtO0fxB4et9Lv72yvNf8AB1u9jBr1p6h/wTwmT4g/EL4M+OvGsIvtN+K3/BUj9sLxt4pl1BRJbXvxK8J/szWniv4VjUBJvjm1HTtX8Z/EHWdFaQtJFqZeW3xMN9fa3jXxX49+AX7S/iL9pqT9k6/+EP7FPxB1mf4Fftg2Pi7xV4Om034teHvFHiKL4e6H8d7/AOCWiW96nhmDSdU1Z4ta8T32q3l7418Aam0+p6NZySRahN+bZtxbxBHPKtCOaYqvRVJWwOJxdH6tjKdOvOg8CvbSWKnjcRWw9aphp0qlWCnLD0p4VU/aVz9g4f8ADrgxcPYfErhzKcHifrHN/auAyycMbl2Ir4WliI5mnhabwVLAYTDYuhRxkK1KhVlRji6tLG+19lh3/GfN4u1aHxvL490C6u/DmuxeKpPF2i3mmXcsN9oOrJq51nTrrT7+IQzRXel3ghltLuMRSpNBHMgRwMf2H/8ABHmTSv2nP2q/FHhL/gob8d9O/a+8bfCrQfh1P+zL4c8T6J408WeGdPPxEtNb13xf4vHhjxb8PPDl9p3izTBo/gvSLjxL458PWV94bhub9NP1b7Pe2l8PxZ8A/wDBIX4oftK/tAftu/CD4CeMvh5oXiL9lv4tahomleBviDqWu6VL4t+Hus+JPGdr4U1nw74hstL19JZYtM8P6X56axb2ltImt6bcTawjTbW/pR+GPw8+LX7BfxY/ZX/aQ+POmfGfVPhrpf7JHwU+CP7SkH7N2p+JfF+m+H/i7+zuNb0nwR8QPi74W8F2ieMPH/wsvfA/i7X4rmbRdP1bS9E8RaXHJ4p0S+0vVLWe3jiHNsrxKo4WnWpzxdSmnGMOSGNpqtDC4ynGlKVKVTkxWF9pSlChUjHEKp9XvKdSPL28J5HneDeIxtShWhgaVZqc5+1nl1aVCeMwFWdaMK8KPtMFjfY1ozxNKUsK6P1u0KdGfP8AvR4+8GWeg6tr1rpGnaR4Ynms510XVLHRdPCWFtJbPHp12kBijiul09sNJBM/lzTQSpM2HZj+fUvwZ/ab1S6+Bg12Xw18Nh+0Tb+LNQ+GWo/Hj9on4teFtR1vSfCGkwazd65rmn+D9Z8O6B4Ni160vtMn0DR30y3u7uXV7C1srIQCSaP9A/AX7Yv7Fv7U/hpNT+F3x9+GXjxUhMyQ+HfFWkXPiLSHlUA2uq6IJ21PSJ2O1brS9ZsrWePA8+GF0DrwPx70j4zfHSx0K1179oL4XeItG+H1/wCItS8E6j4g8BR3/jbTNC8QadYx6r4P1PWdA8Z6Jpuv6K0+kaTd291e+Hz4labSNNF3rl2Rfm/4ckzDKcJKtSzCcMPKEYOnFqcHHkupxlTpR9pFtOLUZxUVaWie/scT4DPcwp4OrlUJ4lOVRVWpUpqpGfs3SqQqV5OjOKkpJypycm5x1cb2/jA+LP7af7UHjj/gof4h/Zf+CT+JfGnxFuPi9Z/s6fDG38AfHODV/AvjfxzpWoxeFWstPk/aDg+IHgrU9K1vxZNeR6VqNvrPgZr60u9OnbWrGW4Wev3z+DPw18Z/EvwRaeGf2mvg/wCIvBfiay02bw18ZfhX8WvBKaFeN4k0yWbSdYsL/wAL30uq6bc+HvENzaSa/ot3YX+s6JqnhjUNM1DS9UvbS+tLuT5h+B//AAT7/aD/AGdfjv8AD79orwr8UP2QtS8Q/C/4k638Y9C0DxZ+zR8QV8Hf8LD1S31JYfFOrx6F+0JoOsXd7ol9fxa/pksGuaZGus6Ro81/Fe2Onx2LfYH7Zf8AwUYvvGel694P8NfFHwj+0L+3J428HzeCfAXgb4NabY3KaV4tvbabT7DxJrmg+Hr/AMR2vwy+HXgy/wBRm8RX974+8SyT2+i2EkF54h1vVHe8uPK4jxeQ5rRoR4fm62YyxMPaV6VDE04UcO4y9pWxNevTpqnTptwmp83LTSlN8sU5L2uD6XFWR1cUuKIxoZOsFOnQw9fE4OrOtiuen7KhgsNhatR1ataPtKcqfI5VZShCLlNqL+Mv2Gf2PPg18Z/gDaeKvitP8Svil4Oi+Jfxu8NfCfwF4y+LnxIvvhj4P+FXgn4yeOfBngTR/D/gK08SWPh26sIvD2g2iRXfiKz169e3EMdveRadHZ2sH0x4/wD+CeP7L+jeDPGGs/CD4WT/AAr+IeneEtfuPB2u/Bvxx8QvhXq1r4ktdKu59CmU+AfFWgW9+66mlsWg1K2vbe5GYriGWNmU/UP7Pfwl0f8AZ7+Bfwp+DWm3cdxZfDXwN4e8LTao+Y/7X1PT9PhXWdcm8zG2fXdZa/1e4BwfPvpOB0r2cMkihkYMrAEMvII6ggjgjvnOPzFfDYvOMbLHYmtQxuLWHliqs6NNV6sKbpOq5QjKlGSh70OX2i5febbldt3/AEDA5Bl8MuwdHFZdgpYuOCoU69Z4ejUqxrqhGNSUa04yneFTmVJqXuJRUbJI8K/4JW/Bf4Y6f+wl+zz8TfAXhnTP+Eo+Ivw08HeN/iLr0kR1LxL4l+IGv+G9Lv8Axr4h1rWbw3Go6l4i1DxJcapcaxc3E8k8s0nkoy21rb28fK/tS/FP9ob4R6nol3418E/EPwL4Z+Ii65qfww0vU9Y0nwLe6r4e0O6trKXVtU0LRLO4+JVily1zb3Fq2s+IPC73cFwZLbSgkTSJ57+xH+1P8N/2AvFHj/8AY5/aR8VQ/B7wpYfFXxd44/ZT+InjMPo3w58X/C/x7rU/jWw8I6d46vgnh+Lxb8Otd1rVfCF74Z1G9sdUGm6ZoF5pVlfWF6JIP02/aD1nxT+2VqHgnxP/AMLx/Z61TRfB1v4ht/B3iKDwHc6h4lvdB8Ww6a2q6TrupWXxMsPDWsWbXOmWVxZ3WkeG9CniSFoy7RXupx6h+lZLjcppVK9bM6ijCvL2uCq1YTlGtSqTlNTU6cZSlU5HTTt/DkqlKclOEoL8k4hwuf1qGEw2UU254WKoY+jQnThPD1aVKnT9jKFSUYRoxnGry8y/ewdGvCLpzjN/gz+1B8O/2q/jRa/sQeAPhN8QP2eNc8W/thfEHxvZ+AvAN5+2D+094S+IcWr+A/A/ibxAtn4ng8J/EXz/AAtHcNpuo+H4b7WTeeH18Xap4U02/SEakl3adL+yN4E+POl6J4l+GPxs0D4i+GvjH4X+K3i7wJ4g+FvxJ1w+M/Evw51HQLqCyfRoviJcWtvqXj3wnq9ssPj7wt4u1WTUml8LeKNOSz1/XdEtdO1e7+gL79kz9oj4UftUeBfj18F/j7+z7oupfBnS9Vi+Hel61+zT4h8WeHdH8V+IvDOvaHdeMzZ6d8ffDNpq+s6Hb+LtWm8Pxaml7olpqyWeoajpOrXGmaY1l9en4w+FPhdP/wALI/au/aG8LXnjvUdDiXxv8aPixrfgz4ev4s1qzsLOG7votPWTQ/Dvh3TnitRBpHhjw7awabpNhFp+k2cUot4pJODivOsixeGhh8olLF494lQlGnSxXLGnCM+ZJVoKLcqjgougnNqMlzKEmp+3wBk/E+WY2ri89jDBZUsHKVN1q2BTqV6tSi4Sk8NUc1CFJVW44mXsoylCSpurGM6f5Ef8FlPhN4E+EXgn4AeJfgn8SW/ZS/ae+IXxH8MfCPVvjt4NuvHPwzsvFHwv1m3+xeNk+L3jv4b6WI7vStD1qTwv4rtE8QXN34jFlpmsf8IzY3tpBrK2v8kf7ZXwP0P4DfFfT/DmjfG6T9oSbxT4PsvHviD4lHwd4v8ACFnqniXXfEPiaz1ZNKk8byNrvjDTHk0iPUrbx4yQ2XihtSkvLJJIUFxP/bh421TSv+CkX7TX7LPiX9nzVfjRa/AT9mn4gal8XPH3x9tovEPhL4PePPEHhuwmtvAPgb4X+G/HVhJ4Y+KfiCbxLfXM/iL4iWvgq8s/Cvg221TTfDvjFNQ8SWwT8hP+C4//AATr/aN+I3x4/aR/bevL/wCHPhP9nn4b/DD4ctpuseJfFc48ReJ5dJ8O6NoMvh3w/wCHNG0nVrmLV73xxqD6PZf8JBLoOn3EmoWk8N9LG8nl78JZtHA1cDlmOxHsKs8NiKleOIlSVVV6uLhRweBcJUHiFNwl7WlQWIjKEZSc6PJKHLw8d5FPNKGZ53luFWJoU8ZhqWGqYSFZ0HhqWAliMxzNThiVhHSVSH1evinhXGpOnCMMQ5xnzfjt+w/8Bfjx+2mLX9lL4F6fp/h7SYfE7/Fj4wfFfxC91H4b8H6Hp66dYaRNrE1rDMfstvPp0M+kaLDHPqniLxA0SW6WOm6Xqeop+x2tfDnxT+zlD4w+Nn7Fv/BXjX/2rP2mv2bNB1D4u/FL4H+O9Xm8deBfiF4D8ESQTfEmM+GtZ8VeL/DPiW28O2Est7q3hy9l8RXsNjHMLfUNC1uHT7xeP/Zo+G3xg8G/sP8AwT/Y7+Angi18RfGL9sDTLn9r39pyTQfG9t8N/ED/ALJbatpHhjwF4An+I+o6TeN4Z1D4oWJktdMEFlqMen6ZeeLjbW0y6xeXi/pL8TPDHh7WvD//AATj0XR/2f7v9m/xFP8AtGeK/g3cfBq9tvDgvvD/AMMNf+CHxp8L/GjSLa88K3F5pPiDwfrfhzSLfW012OXy/ENnHo+t30UNzcBR6mN4tzbCZnF4HMa2CwsMRXhTw+Dr4aM60cFTrTeJx0ZOWKdHEzoYijQ9hGhGlQviPrEpzhTl85g+AOHMzydxzjJMJm2MqYXDuriczweJrU8NLM6uFgsFlkuWOCVfCU6+ExGL+sSxMsRirYT6rGFKdSH9kH/BPDxz+zH+33+xV+zr+114a+B3wz0WH40/D2y1zXfD1v4Z0i7g8MeOdIvb3wt8Q/C9tdSWnm3Vp4c8daF4i0azu51invLKyt7yWKJpyi/Z/wDwzt8Bf+iP/DnH/Yo6NyeOn+h8/gfXrjj+df8A4NAvFGoa7/wR60bRr2d5bbwJ+0n8c/CukB2zHFp11N4W8aSR2+SdsTat4v1ScgbQZZpW2ksWP9RuenIPXjnPHOAOxA9e+Bxmv1H+181/6GmYf+FuJ8v+nvkvuR+I/wCqfCv/AETPD/8A4Zsu/wDmY8Y/4Z2+Af8A0R74cf8AhIaL/wDIdFe0joP6dPwoo/tbNP8AoZZh/wCFmJ/+WeS+4P8AVLhX/omeH/8Awy5b/wDM3kvuPxI/4eL/AB2/6AXww5z/AMy/4j7ck/8AI4c4/HocUv8Aw8X+O5/5gfww64/5F/xIeR9PF5/yMjjmvgnOMcgY3Hp39ME4zg9OxBGetL0H44+73Gc4AP4n8Rz0H9Rf6l8K6f8ACFgOv/Lt/wCfnru/Q/y8/wCIz+Kf/Rc5/wD+FUf/AJX5fn3Z+Cn/AASd8GeC4PiL/wAFC/2YfiF4X0PUfEv7OX7a0/7Q/wAMrW/tSZdFm8UReJPBeneMvDCTSPcW5tPDGkaeizeZL5Nj4otUmMjXKNXrP/BQ34xax8Qr/wAe/sfeGPEnhv4d/D6y+C938Q/2tfjN4l0W08Rx+Bfhd4kGr6fo3gnwlo99usD438YW2ia7qk2t6hHIfC3h/T01LRYbnxDd6cbX5z/4KDWOu/sMfty+Bv27NA1DxP4a+CX7S/gnUP2Zf2ofEXgiONvEXgu51vRI9F0X4jaQk9pfWf8Ab2jaVpXh3xX4ZSbT7uK4134X3GnXY3eI44pviLx7qHxy8a/FX9sX4CePdK/4WV8XvE37PPwD+I2neNfD1hJL4M/aN8Ifs7+M7fWPC3jHwxNp6XWnjRP2gPhbrWhq0cEn9mx+OpvEfh5fIla208fwxx9wXWyLj/N5VuWOEjUp43Lo1PejTo1MRhYfWoxqr2NSOFoVXipxcnGGIlSjUhOKqJf7LeCPifhOOvB7hPE0Oatj6+Enlue+xfLUq5jhMFjKlTL6s6EliaU8djaH1KjJRhUqYKNWpSlSc6Epe9/8EqvGnjnwH/wU4+Elx8TNK1nw9r/7Yf7BHh4XNp4gsrnS73xDrvwusbPQ9I8aS2V4kVyl54x8KfAHUfGii6iiu5bfxhJd3MEU10QP6+HdUVpJGVERS7s5wqqoyzMSAFCjcSTjjr7fyn/tkfGzwBe/tGf8EiP+Ch3wu1e3u/h7qXxVT4YeK9XjMcM+jeHfGN9o1nqPh7WIUP8AxLNb8NaTqvxR07W9HudktpqNpdWkgTy5Gb+qm9txd2d1bNytxbTQn/trGyA9BggnPt/L8s4mcsQ8szCVH6vLE4OphquHad6FfLsZiMLKg+ZJ/uqKw8VdKVrXSvY/f+CowwtPOcrjiFi44TMaWLoYpWaxOGzbL8JjYYhcratWxEsXO8W05OSTdmfAvxz/AGU/2VPj9rtxrPxH/Z5+EfirUy7keKb3wNodp4yuZCSzXb+L9OsrLxOsjOPMiK6qrQ4WRdsxZj89j/gmd+x/Cpj0/wAH/EzRrds/6Dof7Sf7Suj6cuQeItPsPi5DZ26bflEcEMcYHAT1+98FcqeCpKkYOQVyCMHnP9R36Uc9iO3b/wCv0P8Ak14lPMswpQVOlj8ZTpx2hTxVeEF6RjNRWy6HvVspyrEVJVa+WZfWqzbcqlXBYapUk7rVznTcnv1bbt9/wVZ/8Exv2H4pUm1L4Jjxg6MHKfEX4ifFj4lW0jg5JmsfiB468SWMu4jLRvbGM9CmMCvR/iBqn7Ov7CnwM8d/ETQ/Anw6+FnhTwzos91beHPA/hfw94Obxh4lEEqeGvCel2GhafZNrHiXxNqzQaTpNsIrm7nu7wyN+7WeVPq78R7/AOc1+X3hPw5oH7QH7ev7Q/jD4xz2us+H/wBi65+E3g/4HeBtdmT/AIRfwn4n8efD3TviL4n+M15pFyRp934vv59Ws/DnhLXrtHOh2GgXb6cItSEV3bdeHr4nHOrLMMZjMRhMHSjia9KWIq1Z1Y+3oUIUaSqTlGEqtavTi6jT9lBzqqFSUI058GJw2Ey1UIZVl2X4XHY+s8Hh60MLQowoy+r4jE1K9Z0qanUjRoYarONFNe3qqnRc6UJyrU+p/ZT+DXx6sf2fvhxpvxm1rVr34h3unal4z8aN4t1m9v7ux8VfEHXdT8ca1oUSNJqUttbeGrvxA/h60tR5dtaWmmQWtqBDCij618GaJ458Lagmn3EFve6DOx86SO8jaKzYgkT2qTGK6XcwCywrblJc7hscF69hR1dVaN1dCAVZfmDKRlSCDggjBBHB7deFZggJdgFAJJIwABySSSQAB1JrjxGNqYmpWnOFGKrVJ1HCEOWEJTk5PkSfuqLb5UnZbWtoexhqEMLhqGGg5yjh6FKhCU5c1ScaNOMIynJ6ym0ryk95Nt7nlj638IfjBffET4W3sngv4g3fgLUNI0b4leBdYsdO1+PQr3XtEtPEeh23iHQdWtri3aPVNEv4L7T55LeW1uE+0RRStPa3cMHzfrX/AATW/Yb1m9n1GH9njwh4TvLhzJNP8NNR8VfCYlz1ZU+F/iHwhHGTxzGiYAHYDHBfHCC1+Ef7cP7Kfxv8J3MMFt+0HfeI/wBlX4x6dYyRvB4nitvB/ib4mfCLxFcWsTCKTWvBuveGNc0d9WlWS8Tw54kl04Sx2sMULfpDz3I/Ij9c1vOpicBDDVMFjMVRp4vDxrWp1qlGUasJzoV4T9lKCdq1Gc6b1fsZ0uZ8/Ml50KWEzOpjKOY4DBYitgMXPDp1sPSrwlRqU6WKw1SHtozcW6FenCqr2+sU63KlDlPgiP8A4Jn/ALIioIX8L/Fe4tMH/QLj9p39p6axdSQ2yS0f4wGGSPccmORWRv41YbgfQfAX7B/7HPwz1aHxD4T/AGcvhUniO2miubbxL4k8NW3jjxNBcRMjxzxeIvG58Q63FOrDJmjv1kOfmYhmz9a8/wB4H8Pp6Ee35+hxTufb8v8A6/1/yOcp5nmU4uE8wx04S0lGWLryjL1i6jT87rU2hk+UUpRnTyrLqc4tOM4YLDRnF6JOMlTUk+1me+aPeQX+mWdzbJHFEYEQQxgJHA8QEbwIigBUiZNiKFACBSowQK/Df/g4f8ZXuk/sD6b8ONJ3S6z8cvj38LPhzZ2EbkS3iWcms+PFG1cs8Q1Twdo8DgBsTXMHynIr9pPAcMqaTPM7MI7i8cwp1VVjRI2dRjgvIGVucfuwcDJr8Ef+Cu923xb/AG+f+CVX7MNs32i2i+KOufHTxhpYy63WjeFdU0C/06SaHkCEaP4H+IVvJIynMU8+xk8t8+hw1Ff21g60knDBe3zCbeiUcBh6uLTb6LmoxXq1qtzh4xnL/VvH0IO1XMFhcqpJWvKWZ4vD4FqK2vyV5y6aJ+aPjTwL8Vfip+yr8ff2p/jdayW2oeF/gn4p+GvwR+Mf7Pev+Fl07xno/wCyN8JfBuh+DvhH8Yfhjr8gS7i/4lkXifx8uhwyXXhPxdo8+pi4msvEFna3EP7m/tGeIfhr4K+BPxJ/an1vTdD1bVfgR8EPit47+Fviy5jSW50fXfFvw/vtAsD4euWIWC78XR6laaAkigvcRagIVKiVgfwm/wCCi/xq8P8AiPxl+2l8ZfAdrLrfw/8AAv7Mej/sU654v0mymvtE8UfFzx/8Rb2618W1/ZpJbXth8INH8Rrp+p6jJIkUXiXWbjw/bzTXE1vFJ538ZfE/7Sf7RPh/4Lf8E538Ta/p/j/9qzW/hf8AEL4kfC1re0+w/sn/ALJ/w00e2f4YeFvFvk24vW+JHjDRrOD41fFO21i9hmg1Wy8DeGrHS9PfWFtrr7LCcPYziTH5CsPRVPGYqvhMFVpQTjXq0fq2A5qDjCK5pQo1qi9rV5fcr0cHiKkqvs1L86zXi7K+Ccm4mxOYYn/hLy3AY/Nliqz5sJhqlLGZpKGN560pckKlfD0pujh3Je0wuIx+EowourKH9An/AAb1fHb4t/spf8EvPg14P0HQfBawePvEvxG+Lk//AAkGj61c6m6+LfFV3YaRcyTWfiHTYGgvPDGgaDe2hW1X/RbiIl3JzX7bf8PF/jvj/kBfDDGB/wAy/wCJOh/7nD6/XHHUZ/ODwL4M8P8Aw48E+EPh94Rsk0vwr4I8M6H4R8OadGAVsdD8OaZa6RpVqCMb/IsbOFGcgF2UswyxFdX83rk4OOwJ4z9e5HGO444H+hOC4F4boYPCUMRlGCxFelhqFKviJU5OVetTpQjWrSbau6k1Kb0VrvTZP/DXOvHLxIx+c5rjsv4tzvL8BjMyxuJwOApYiMaeCwdfE1KuFwkI8jtHD0JU6KV3pDVu7b+9P+HjPx1/6AXwy/8ABB4j/wDmvor4EyP72PxX/Ciuj/UvhX/oRYD/AMFP/wCSPG/4jV4q/wDRdZ9/4Uw/+VH9Iv8Awzh8A+f+LQfD3Ax/zLGm5wcj/nh64/XjNB/Zw+AYz/xaD4e/j4Y03vjr/o/B9uvQ17VuXB5B5z6dT+uOvH8+aCRjt2ByPfv9OTj/ACf5f/tjNv8AoaZj/wCFuJ/+Wn+o/wDqjwp/0THD3/hly3/5mPjT4/8A/BP39kD9pb4OfET4FfFX4H+B9S8CfErw5e+G9bTTtFsdM1nT1ukDWWueHtXt7Y3GjeJNA1CO11rw9rFtmfTNYsbO8jDmHY3+e7+05+x94r/4JMfF3wb+y1/wUTsfid4w/Y1tfEWtWv7C3/BST4NX2v8Ahb4lfBXTdflmvL74c+I9b8Mtd3VvpQt3uJ/FPwr1mDWYtOlTUvEvgXSvEnhyeOPSf9O3cBnp1HUEcdfTr1I9+a80+MHwa+E37QPw68T/AAj+N/w68HfFX4Y+MbI6f4m8D+O9A0/xH4d1e3LLLEbjTdSgnhW6tJ0ju9O1CBYr/TL6GC/0+5try3gnTyc1pSzmm4ZhiMTWmo2pYiVepLE4dpSSlRrTcpQspzThrTnCdSnUhOnUnCX0nD7w/C9XnyTAZfgaMp89fB4fBUKGCxbbptrE4ehClCo26NFqouWtTnSo1KVSnVpUpw/zDP8Agof+wP8AsVfDn9gLx18d/gX8SrL4heN4/GHhf4h+Gfi/rfx7XxnqPxBvvEPivTbPxLZadaWOuad4SvNbu9K1rUNbki0rwlB4gkvNMZp5DKs7j+pH9lP4uw/H39mj4CfGiOdLib4mfCTwH4v1MoVIg13V/DmnT+IrF9p2ibTdeOo6fcKuVW4tZFUkAE9/4t/4NKv+CPWpXvjnXvC3wr+Jnh/VPEHh/wAUWnhTw5L8afH1/wCA/BfiPWdHvbPQ9esNPn1B/E9+nhvVbm21iy0vWfFeq6ZPLaR2l/Z3unl7R/x//wCCBfxA1u9/Yw8Qfs/eNo5dO+Iv7Jfxu+JvwY8UaFduTqGmRHX7jxZbLcg4cQ22t694m8OWwkAaMeGpoAiQwxg/lHGGQ4jA5FQrVsfXzOphszqT9viI8tSjhsbSp03SXv1E4RrYeja3JHmqvlhFWR+7eH3E+FzLibFUMPlmHyenjcmpQeGw1Tnp4jGZdiKlVV3elSftJ4fF4jm5vaT5aMXKpN3Z+ouu232PWdTgwAq3ckqAjok489AOnASRQMdgaysj1Hr+H5/Xmuz8dWxh1eG5Awl3aR5bBIM0LNGwzxyI/J68464rjMD2568dfw9e/OelflZ+0SVm12fn5P5/8Ou4cd8dfy6n8Djr/wDqr82P2yf+CfVv+0brmreO/h/440jwB4v8XaB4b8JfFXwx4v8ACtx42+Evxr8M+DNaXxB4LtfiD4X07XvC+rw6/wCDtVDv4f8AF2h67a6pDpktz4fvob7SJxBB9pfFT42fDD4J6Pb698UvFun+DtGu76DTLTUdVju0tLvUrqOea3sLeaK3kilvJo7W5dLZHaUrE7bcDnx6T9tj4GuEOly/ELxF5wbyG8N/Cb4la/HOF27vJfSfC155wG9c+UH+8v8AeXPr5XQzqFSOLyrCY2rJNxVShhKmIpy5XGTjJeyqU58s4wmk03CpGFSPLOMJLw84q5FUpSwWc4zAUYSSm4YnHUsJUjdSipwk61KpDnhOpTk4ySqUpVKU+elOcH8afDzwx/wUK/Ys0C2+FPgf4e+Dv2w/g34fjNh8Ob+5+IQ+HPxR8I6Cjs1l4Z1xfEttquh+I9M0WF00zRtRj1fTL/8As61jW9tJJVRpMr4pfBz/AIKC/t3eGtR8BfFNfBf7GfwavoC2reF9E8QH4s/ELx7e2xE+n6X4pvvD114W0nS/AT3yWk+u6DpOuHWfEFpbzaO+taVbXstwn3DH+1ha6i23w78Bf2qvE7MoaL+xv2X/AI+XKyIchXEx+GwgVGIIV2mCk9DWrB8dPjDqBxo/7EX7ZF4pG5JLr4GeMNCSQcYKnxJpmigZyOGZTjkgKN1fRU8v4olVWKpcLzjj+f2n155diFV9rpL2yoVan1GNW/ve0WFTU/fjy1PePmquZ8HwovBVuMcPLLuT2Ty1ZzgHRVFLlVB4mjTWZyope77OWPcZQ/dS5qXuHzd+yb/wTi039nvV/Anif4i/Gnxd8dNZ+E9r4jtvg9oWo6VF4S+HHwrfxcLuLxBqnhjwkNY8TarfeIbrT9QvdDstb8UeLNem0Xw/dNo+iQadaR2ywfpj36g8f4d89OnFeH+G9Z/bJ8b3U1n4L/4J5ftQavcwwrPKL+1+FXhiGOJiFUyXPiv4o6FEjFjtCtgkghQdpx4uPj7+0l/w0Uv7O2pfsw23hPxB4Y/szUfjHqet/GL4deKIvhLo97IXTSvEi/CfVfiJotv8QtTs1afRvAF54msfEYhlttU1ew0zQ5v7THBmeRcUVPa47NcHVoxpQcqlbEywuEp04OTqNRhzUoJ1KlSUlCnDmq1pyajOrU97uyniHg+j7HLcmzHD4iVapGFKhg5YrMK1WahClFyqRjiKko0qVOFN1KtTkoUKUYuUKNJKP2x/3zyOvXPT6cduvpSEntg9sDrnIGO/v+lA6D8O3Tp/j+H4GtjQbP7drOn25GY/PE0oxkGO3HnMDx0YJs78sB7V8qfZLVpd/wDgHtGkWf2DTLG0AAaG3jDgd5mUvMfbMruc+/pX8rvxK8J/Br9u3/guZ8cfBnxiu9I1f4f/ALOvwE0r4d+FNCu/Hl94I1HW/GOn/wBh6hq1vol1o3iHw5rssvh3XPiB8QXv10y+Y282mW815F5Uq7f6kPGPizQ/AfhDxV458TXi6d4b8GeG9c8V+INQkICWOh+HdLutY1a8csVAS1sLO4mYsygCM5IHNfih/wAERf8Aghl+yX/wVk/Y/wDjD+3N+3Z4J8e3fxH/AGk/2tPjF41+GvinwR8QvEXg/UtP+HthqUNhrVtFDC934c1a21H4nv480+abUvD97d2kfhyyi0y9s0ku4H++4FymrmM84qU8RUwco4FYOliqcW50auLqKTqU7TpvnjSoTi+WpGUVVTUk7M/NPE3PKGU0uH6NXC0sfCeZvH18FVko08RQwFJxjSquVOtFQnWxVOa5qU1J0LcskpJfnN8W/B37Jf7NXxD+Hv7N37FWk/FP9vn9sm+8VPF+zv8As0TfEe/+MfwJ+Afju+u7q+h8b674W0WOy8M614o8Nz3F/rNpoviW+15tBMF34n8Y6r4csrVNUn/sC/4I0/8ABEDwt+xL8N/GHxk/bA/4R/8AaF/bz/aTu08WfHn4jeIoYPFFj4NF/ONUT4YeCdQu4ik1hYX8v2rxZr9lFaW3ifW7ayis4E8OeHPDMMH35+wX/wAEm/2Cf+Ca2lajb/sn/AjRPB3irXrIWHib4peIrzUfHHxX8SWQdJZNPv8Ax34lnv8AVrDRJZ4obmXw14cOh+GHuoYbz+xhcokw/Rvcvt0x0J/DoOOlfr+TZc8mSq08XisRj955hVqSjiW7ttUnBr2FNybk4U3ecm51Z1J+8fgHEmaU+JVPC4rLsBRylpRhk8aEKuC5YqMYvEQrRksXUjGEIRnWi404QjToU6NNKC8V/wCGcPgH1/4VB8Psf9ixpuf/AER9O3GfcGj/AIZw+Af/AESD4e/+Evpv4ceR9OPcHPIz7Xx3Azg9vrkeueuR165xmgFfUHHPA+pPr26+/XrivpP7Yzb/AKGmY/8Ahbif/lp8R/qjwp/0THD3/hly3/5mPFP+GbvgGef+FPfDw57/APCM6Zz7/wDHsaK9qynfH5f/AFqKP7Yzb/oaZj/4W4n/AOWh/qjwn/0THD3/AIZct/8AmbyX3H4tH/go98Zz/wAyl8Mzn/qG+J//AJqOnAz6c54Bpf8Ah498Zuf+KT+GfvnTfE+OPY+KMZ6ZA5/Kvz67cEYwc9O/0GR06flnFJj0K9T24zyT1HQdO/Y8V/T3+pHCn/QjwX3Vf/lnl/V2f5hf8Rt8V/8AouM66fawvlf/AJh/61t0t+gx/wCCj3xm7+E/hnyMn/iW+JyOP+5owenXrwKD/wAFHvjPjJ8JfDToD/yDfE59eCf+Eox1/XNfnz+IOBknGeMDH8PGP880YGOo6YJA+pzjaegznkE4GcUf6kcKf9CPB/dU8v8Ap55f1di/4jb4r/8ARcZzuvtYXbS9/wDZ99/6sfoN/wAPHvjNz/xSfwz9/wDiW+Jz/wC7R7cevHqK/km/aj1P9oz/AIJt/tv/AB+/4KJ/Bv4bWnxP/ZS/at8UL44/ay+Dng6K6tNU+H3i+5vL3VNb+IWhLcSajc21rLr+teJfFNprcz3WjW0/ifxF4b8UQ6RYv4d8SW/7p9uSM8HOPqeTt7+h5P8AOOaKG4ilgnihmt545IZoZo1limhkVkkiljdWjkjkQsrRupV1JVgQSD4+f+GPCOe5VisrnllHCLEwSVehz89OcbShJxdS04qSjJq8JJpSp1KdWMakfq+CvpKeK3B3EuW5/LiPF51TwVVurl2YSoqjiKM0oVYwqU8OpUKzg5KnVtUgm3CtRxGHnVoTwf2a/wBqT9n39tr4U2fxL+CPjOw8YeHLnyYNZ0qQpY+LfBOtvFvk0LxboEkj3/h/WrciTyxKJbHU7dRf6PfappNxb3k/Xa74eu9Ek3EtPYucQ3Srjbn7sU6gHy5AOh+5JjK4O5F/C39of/gl/wDEL4RfEm7/AGsv+CXfjg/AL4327vfeJ/g3bXUNh8JPilaif7Zd6NBpV2p0DRm1OUMG8Ma1ay+Brm6a1uLA+C7u1OrSfTX7Fv8AwV/+Hvxr8Tf8M2fte+EW/ZS/a30q4h0DVvAXjuK50TwR461V1SKKTwTrOuMP7PvdXcpNpnhbXbuSfUI7uyXwnr3jBJWuIv4U8Q/CHiLgbE1KvsKmPyiUpOhjaEZVFGCu7VOWKu4xTcvdhUglKVSlGny1J/7F+CX0luA/GbLqMMLjaOVcSUqVP69k2MqU6FeFR8sXKEJTadOpUaVKpCpVw9SUo06Vd1/aYel94eMfBXhP4h+GtV8H+N/D+l+KPDGt25tdU0XWbOK7sbuLcHjZo5FJjnt5UjuLS7gaK6s7mKG6tJoLiKORdv4f/tIftsfs3+GJvhp4P8X+Hf2rfg5Fb2y+Hvht+0D4r1Tw/wDGTwBp1ncRNbab4L+P1povi278V+H7ZFe0s9L+KfhPXNbs7OOGytviBDbQpbV6B4i8Jy6bvvdODz2GS0kQy81oO5PVpYB/f5dB/rMgGQ+WeIfDtp4htVimeW2uoGMtjqFsxju7KbAG+GRSrFHAAliLBZFA5V1SRPz3Jc/zTIq6q5fipUYucZ1KUoqth5yja0p0Z+65KyXPBwqW0U+W8X+1Z/wtkvElD6vm+DjWahKFPEQk6WKpRle6p14WlyO7bpz5qd3dw5kmu0uf29v2xrxQuk/sYfCTSZHQgTeJ/wBrPUJoonzhWeHw/wDs737zIpw5RJoiy5USIxBrm7r9rz/goNeIzJ4J/Yv8Eo3Ky3/iT40/EFoFI4EsMWk/DGKVoz1KXcauCcFMAn4h+KngT9r25k1CL4f694NfSBeWlnpZm1XV9R8TX9pOkf2jU7+bX5NJ8N+EI7OXzlljt9K+JdxLAkUtvp9xNPJb2uv4D+DPx6LRyfErxD8O2hTS7W1Wy0BNf1PWH1KERJcarqPimey8OaXqJu1WR5LHTvAPhyCO4lMlu8NvGlpX3NXxD4olRVV5zlVG/wDy7oYSnKunaLS5Zxna6eju0mnGTUk4nwGH8J+B4Yh0Hk+cVlHX22IxVaOHkk2m1ONSi2rx1jyqbTjKEZQlzH03L+0J+3z4tttW8H3v7ZngL4caR4stZdM13Tv2a/gmfDfi3+zJ1CXw0Lx947+I3xSvfCerG0823g8U6Xo1lrWlGT7Xpl3Y3kcEsT/h58N/Bnws8N2/hbwRosWjaVFPc390xluL3U9Z1e/kNxqmv+INZvpbnVfEHiLV7pnu9W13WLy91XUbp3nu7qV2Bqx4R8Gaf4UtWETG61CdR9qvpECu4BDCGFPmMMAb5igdmdsNIzFYwnf6fp15qlytrZRGSQ4LuciKFMjMkr4IVfzZj8qKzECviM44hzfO5R/tHMK2LhB80IyhSoU+a1ub2VCnTg2lpGU1KaTaTSbR99kfCuQcOqaybLaODlVSjOopVq1aUb35Pa4ipWqRi5ayhCUYSkk3FtJlSKOWeRIYY3mmlYLHEilndjjhVAJ9z6YOTgV614X8MPpO69vHDX00Jj8pCpjt42KsyFgCHlOxQ7LlFGVXcDuPnPxN+KnwV/Za+HurfFH42/EDw38PvCekxFdQ8T+JrwWwuLgo8kWk6Fp8azalrOq3gjYWGh6JaahrOpSKUtrS4ddq/wA//jz9ub9t7/gqnresfCf/AIJ9+Gtf/Zw/ZaW+uND8d/tbeNre40jxXr9hGzQ6laeCDaSedo808TMIdG8J3V74wJfT5de8T+AbO9u7UdfDHB+fcXY6ngclwNbESnNRlWUJexpq65m52s3BNSkk/cj79R06d5ryuOvEXg/w1yfEZ3xbnGFy7D4en7RUqtaEa1Rtfu4wpt35qjTjSVnKtO9PDwrVnGnL2z/gr7+3novi7wl4o/4JvfspLe/GT9qj49GD4c+K9M8Cypf6Z8L/AApfXtvJ4xsvFerwb9Pg1rV9Bgv9E1bSZLiKHwp4dvtY8ReLr3RYrLT7TV/3w/Yg+Pnjv9in9kb9nr9lTwX4V+Gk+ifBD4X+G/BUmonTfEZk17X7a1N74w8TXHl+IbWM3Xinxde654iu/LtbdDdapLsgiXCL+Xv7FP8AwT9+Av7DvhS50/4b6ZP4h+IPiK3iHjz4veKkhvfHHi+4Mi3E8BugjR6F4e+2gXFt4d0kpa744bjVZ9X1VJNUl+5MZ544Hpx+Hy8gHr6Z5I5z/enhx4NZPwlk8sPnFOlmuYYucK+IdS7pUKiik403CS5pNKKlZypwUVCm6jdStW/xr8evpb8VeI3FFLEcFYvGcMZDlkKuGwlSioRxmZU5TjKNWvCrTqOjRg/aOjGSjiKjqzrYiNFexweE/QX/AIePfGbv4S+GnIyf+Jb4mPrkf8jR25z+PvS/8PHfjOOvhL4Zjjp/ZviboAf+po6Dmvz5xjuM9OnB7novI4/DHJ5xR+RHuPx4G0kDHJ+vWv0T/UjhT/oR4L7qn/yzy/Puz8K/4jb4rf8ARcZ1/wCBYby/6htu783t0/QX/h498ZxjPhL4aDj/AKBvifj8P+Ep7Y544z70D/go98Z8D/ik/hoOP+gb4n/ED/iqO2OeK/PrHrgkkdsc9Sfu/wD1uec90x2yO2OB+J+7yPQ9Pej/AFI4U/6EeC+6p/8ALPL8+7D/AIjb4r/9FxnT2+1hvLX/AHfbe/r6H6C/8PIPjN/0KXw1/wDBb4m/+amivz3oo/1I4U/6EeC+6p/8s8vz7sy/4jf4sf8ARc5z/wCBYb/5n8v6uz+j/wD4Zs+APP8AxaLwD25/4R6w6dz9w9Pf9c0f8M1/ALoPhF4A7Y/4p6x9v9j68fTrzXt/GCcKefXj69P89RnPJkeg5I78nPrgfpyDzziv5i/tjN/+hrmX/hdiv/lvkvuP9QP9TuEf+iV4c/8ADHln/wAyniH/AAzX8Au3wi8Adv8AmXrH/wCI/H8hzR/wzZ8Af+iReAO3/MvWP4/wdR/nrx7fnOeF6jOT1HY9Ppj/ABqteXlnp9nd6hf3NtZWNjbzXl9e3dxFbWlnaW0bTXN1dXMzJDb28EUbyzzyukccSNJI6qpIP7Yzf/oa5l/4XYr/AOW+S+4P9TuEf+iV4c/8MeWf/Mp4x/wzZ8Av+iReAP8AwnrH/wCI/H9AT1pf+Ga/gF/0SHwD25Ph6x9Mn+Dr6fhyeo/lC/a6/wCDib9qr9q39onxJ+w1/wAED/2f4v2jPH+gzXeneNP2qtc0i11z4d6Ettctp994h8CWes3ukeAtN8H6dfqLWz+LHxZ1k+EfEd75ln4b8Ha5Z3eh67q3yT+zT/wS1/4L3f8ABRT4Yw/tOfET/guJ8W/g3rfifXdb0o+B/APjv41aPoenDRrvyJ2XQ/hR4o+EfgHR3e4eRVs/D3hl7NI0XZeyIFVT+2M3/wChrmXT/mOxPy/5e+X4B/qfwj/0S3Dn/hkyz/5lP7c/+Ga/gFz/AMWi8AdeD/wj1jz3/ufh696/kK/4L2fsz/An9vL/AIKKf8E6/wDgkp8CvhZ4B8LfFTxTrF/8eP2m/jl4N8IaAvxD+EP7OWl2esQf8I7a+Jnsrk6emu6VZeNPEy+HdZkXTbrxXb/CiSS0nTX4BP8AL37LX/BG7/gsl+038HG+Mlv/AMF9v2nfAmkx+JfEHhyfT9Y+L37UuotbtoF1b2sl/NfwfHKzgjt7h7hWG+JRAqs0sm0Fh8m/8EpfF37YP7HH/BUb4pfCjxf4g1X9pz9qv/goJ+zsfAnwR/ap+JOu+IfE3jW1vfA/i9IPEd1P4r8b6p4j1q50/QfB3w41+bVLLUdXuhp0fgbwBqMsE2i2EWl3WdbMMyxFN0sRjsbWoyaUqdbFV6lKTi01zRnOUHyvla0dvdfY6MHw7w5l+IhisvyHJcFi6cZezxGDyvA4bEwjUi4S5KtGhCrBTi5QlaSUk3F3V0eF2/8AwUC/a3/4I3ftn/HH9gD483uvftm/Ab4AeKL/AEzRfFgKv8VNH+E0UNnqHh3xfoGvLeata3VnYeGtTsF8SeBPFeqahZ+FddstQ8J2Xi3wzb6NdGv6JP2bf2k/2Zf21/CP/Cwv2cvilpHiK1jigfxJ4X8oWXjHwfeXIyLPxV4PvpbXWNBlMqyww3TQTaLqjwyTaHqOoWgFzJxv/BZr/gkV8Nf+FT/ssWv7MPiDWLj/AIKveFvGU2r/AA78VaT5FxrXx0g8RXLXPj3TPihLqVw0WkeCYdRS5HgnUtce702xaXxJpWuw3mieIPH3iHTv5VdR+FX7NerfGPUvh7+1FofxR/4JW/t1eErltN8a6Xpd1L8KPh/4j1d3KnxNod7qttN4f0DTdd8s6lbTW2teGNC1aOWC90bxL4wjuRqUn5XxVwnl+Ik8bRw9bCSnrVxOAofWIc9/elisvjKE3GSu/rOFlzRk5OvRkkpv954H43zTD045dicZhscqclChgs2xX1Ko6TjFRhgc3qRqUo1IStH6njYqE4ciwteMuamv7Z2+H0o+7q8bY5G6yK/Q8XL4479ue1RN8P70fd1K2OOPmgkXr9C3Pt9K/mJ0f9gH9sV9LtNQ+Gv/AAVa/bEbwvexCbTrvSvGHxD8T6Jd27BAkun6jofxitdJuEIUBJ7VWQoQFO3Ob83/AATw/bQurS5vPGX/AAVO/bTutPtY3nurq48YfEfQdJs4UC75bi41f4vXlhaxqozJJI0aA/O55JP528kytOz4iwqd7cv1DMOe/ZxdJRv5c716n65/bWeNXXCOKa5ebn/trJ/ZWsndT+s8zi07p8i0Vz+l+XwTPZxyXeo6vp1np1rHJcX15K5hjtbSBGlnnkknWOCKOKJWeSWaWOKJA0jttU5/EX9tb/gvH+zz+zq958Iv2SdNsP2mPjbPdDRLbUtHuprj4SaFr9zKLO2W/wDEumOt58Q9TF3LAsOi+B5v7Kuy0lnL4w0m8gNmfww+P/wk/Z6s9c074Vf8NZ/tc/t9/HrxTqlvoHhL4HfCP4gj4it4j8R3MnlWGjanrdpo/wARrDzJ7wGCbSPDtxr3iyOXMSaLCS1zF++3/BEj/giN8Nf2f/2pYdf/AOCrPwTt/AHxh+Jfw/huf2UfgpqN9bX/AMMfC7+I7G4sNUvNd8SwavrE+ofHLSrO6itPCFld6/dzeD9de9v7i5bx6/gsaP8AZ8PcC5fXksXjK2LxmHjaVOnLCTwFCq007v2tR4mtTs7pwjRpyt/EmrxX5zxX4k5pg4SwOXUMvwGLneNWrSx9LN8RQTW0XQorBUKqaakp1cTVi3/CptRmfKPxf/YJ+O37O/7bX/BMT9p//gvR4n8K/tHfszftXfEHWfBfjfwh4d8W69a/CD9mjxPr2kWt18MvDXiS+8OXGheFB4djvtW0bxb4ttfDKzeGvEHh7wX47i1LXfGltp82rap/ogeHf2Sv2YfCWhaT4a8JfAf4U+HPDWiWNvp+h6DoHg3RNI0TStNt4wtrZaXpunW1vY2VlEmFgt7WCKGNPuKBjP8AFd/wV1/Zn/aJ/aB/a6/YK/4N/bT4v/a/g38Q/iTrf7UPh74iyafH4k8bfDr4N+GfCnxB0axTxHpCano8V9D4P0zS/iyND028u9L0/UtYTTU06+srK5s9K0Xm/wBmP/g2j+LX7QOrftB6Vcf8Fc/2n/CqfA745eMfg5ayW3hnxLqv/CQ23hS6a2j12aOT49aeuly3irubTo2vY7fIUXko5r9bwVSrlkPZ5bVqYGnGEaajgqksLFQTvGHLRcFyp6qOyeu+p+AZtgsFxBNVM+weFzqr7SdbnzbDUcxmqskozqKWLhWanJJRc01JxSV7Kx/cj/wzX8Af+iReAcdv+KesT2zz8g9+n19aT/hmv4Bdf+FReAf/AAnrHGcD0T1OD+HHc/w4fs2f8G0fxb+Pnin9o3w7c/8ABXT9qDwwnwI+NniX4SWt3B4Z8Taq/iWDw/dXVuuuTwyfHuwGly3Itg7WEcl+kRfaLtwu4+ceBPh//wAF+P8Agk54h/aG+I37HX7Rfi7/AIKF/ssfsqfFfxD4H+LfwQ+KkniPxhrNx4V8P3WoS3vifSPhdrviLxN4l8O6SLLT5LzUdR+CHj5fEWnXQfUtb8Naj4btdTlPd/bGcf8AQ0zLRJ/79idFpb/l76fgeMuEOEHb/jFuHNb2/wCEPLOm/wDzCn963/DNfwB6/wDCovAOPfw9Y/lnZ357HoaP+Ga/gF/0SHwD07+HrHrn/c6dfpg9elfDf/BJn/grX+zx/wAFbPgBL8WPhEk/gv4i+C5tO0T42/A7X9Rtb/xX8LvEuoQTS2LreQQ2ieJfBPiJbS/uPB3jO1sLG31mGyv7G+07RvEGk61omnfqn14wvQd/6YPGfc0f2xm//Q1zL/wuxX/y3yX3D/1O4R/6Jbhz/wAMeWf/ADKeID9mv4A9T8IvAP8A4T1j/wDEEf8A6xSj9mv4A/8ARIfAOD/1L1jnrx/AQe9e3Z6fd59+vbAHQnp39s96AfoMZ4B6euQOB3+h4zzR/bGb/wDQ1zL/AMLsV/8ALfJfcH+p3CP/AES3Dn/hjyz/AOZTxD/hmn4BHn/hUXgLn/qXrP8AouPyor3Dn0A/H/61FH9sZv8A9DXMv/C7Ff8Ay3yX3B/qdwj/ANErw5/4Y8s/+ZfJfcfjB/w8h+L54/4Qv4anB/58/E/tx/yM4yORx7euKP8Ah5D8X/8AoS/hryeR9j8T8H0IPifgnH59+lfnnwDyF/Mdc85HOMZ6Z/PAIMKDjI9xnpnH4noR7Z5GM1/Tv+o3Cf8A0I8J0/5+vXT/AKe6/hq9d2f5jf8AEb/Ff/ot84/8tfL/AKhvLX/h7/oYf+CkPxfwc+DPhrgnP/Hn4o5xjp/xU3PTnrX883/Bff8A4KrftWfGD4dfCH/gmZ8AtM0DRvix+3P4t0vwVrw8CnW9P8S6h8PtT17TvDFl4J/tC71u9/svR/ib4p1CLStevo40Sfwp4d8U6NqDjStVvkf9DuPYexJHrj/Aj65zkY/GH9n/AETT/id/wdR/s+ab4si/tDTvhT+z94y13wxZTfvbeLUtI/Z9+LHiXSZ2R+I307xF4pudbtmjBZdQs7WUHI3L8Tx/w/w9kvDtbE4HKMLQxVbE4bC0q8VUcqPPKVWpOPNUlHmlToTpptO3tHJWkkz9r8AeP/EHjTxEwuXZ5xdmuNyvBZXmOaYnA1Xh1SxfsYUsLQpVHChCfJDEYyjiGoyXM6PLK8JST/pP/wCCCH7P3wd/Yp/4JfeF4fCnhvR7XXdU8ReMNT+JfjOy060tvE/xP8Y2WuXWjWd/reoeWLy8S3iRNL8PWNzPLbeH9GVba3VFW7mnj/4Jp/F3xn4S/ZU8O6FodzY21jB4u8aXA8ywiuZnkudXaWTzJJy4wGOFEaRkADJLZNdF+w/fyQ/8E9vhBpiMVjvviF8ULqUA/fGn+I9TjRW9V3X+8g8b1Q4yox4f/wAE+f8Ak23RP+xo8W9P+wmf8+3tX5PleDoydKdWEairRxTUZpSjy0alCEPdd1dOVS710a7H9lZxjq8I16dGpKk6E8HHmhJxm3Xp16k7yTTs1Gnpto31Z9Bf8EsPi3omg/srzeCfFOl3D6Pqfjn4gCbU7FlnkSPVp7eK4juLFwjGJVZiJYJZJQDhbaRgCf5e/wBvW28Z/Af9nL9jn/gpl8KbWXUfHf8AwT5/bevfE2oWwkmgt9W+FfxB8R+GrDxJo2qSwgyLpWueItE8L+FryLAEeneNNcKsjTSCT9//APgn1/yb1B/2O/i//wBK7f8AnXgPwz+Auh/tR/8ABPT4+/s9eIBAlh8XYPjD4KhvLhPMj0nWdUtIl8Oa+q4b9/4e8RR6VrtqdrFbnTomCnbitY4ClOjSjBNSxODqykruUfbQVBwmk2+VuTXNZpNW23Mp5lWhWryqSThhMfhoJxilN0Kn1hVIScbKajFPkum027vofp/+zPB4S+Enwj17/gpx+1f4y0rxJ8Sfjb4L0TxzoWqWUlvqOn+Efhx4w0q01fwF8O/hnaC5lhn1XxFo93pkQFlcsi28kOnm8+yW+va1qv42/wDBVHxp+y7Y/s5+I/29v+CrPwF8N/Hbxn8W9Ouvhd/wT4/YY1G+1TRvFeoDWHjm0/Uode8NGz8deHiIb6DxB4s8V6PJa3Gn2d9btFbzeJPEvgLw1ZfD3/BMT9vb4ZeE/wDgmp4d+N3/AAUy+JthL8L/APglp4m1X9lj4b/sz6fcW9346+M/xp8Nx3Ot/DvR5PC93dhPEdzpHhC90rwbpElw0Xh+30zwVql/rk2k+GtC8XTa167/AME+/hN8XP8AgqB+01rn/BYX/goFZxv4h0jXLjwn+xp+zHcC6ufBv7OngvSVtdV0bxDJpuoxRLdeKY4tWhvtIvLuyt72bxFPqfj3U7W21ibwtZeFfEw2Hniq0aMGlKV3OT+GnTVuZpaXaWlurajpds9/GYungsPPEVFeMbKEVvUnL4dVdJPdt7K73SR+Uf7CPi7Sf2M/22rj4GR/DTxZ8CP2eP22vBnhPxp8Jfhz428R3fi6H4SftEeGPCWiN8Tfg3Z+NdUnlvdfjsNc1PWPDtjqetGLxTqFh/wqu01u2k1fUJbif6A/bt0+8/br/bi/Ze/4Jq+EbDW/GHgrQLy6/aN/aw8P+Ftbh8OXV14G8J6W2o+HvAt14rk3QeE77xNpz3ekW2qX9vd2ula18Rfh5r32S5ltIImxP+Cvfif4B+HP+CfE+oeNdd8QeGv2ldA/bU8T+NP2StZ8K6HeXurWvjDw4vgJ/Gy32vL9lsNA8Njw7dW9/dm41OO+l8SaZ4T1DTdL1h9Hlhi9T/4IvfEL4Vfs06T8eP2jv2ufFut+Mv29f2svGMGu3/w3+GXw/wDGnxo+Kth8JYNP07VfCVjpfgb4Q+GPF1/4c0vxHr2oao0u9NO8MxWHhvwvoxubdvC8iQfOrgvB1eMcLnVStSVBYRVq9JtKX1yMVSpVHpypujstJqpGjWSk5St9ZLxAx9DgPG8O06FWWIeO9hh66TlFYCc1XrUopPnaWI3fwOlOvQlypQb/AEj/AOCN3hX/AIJZftt/s9/EX4F/s4fsq+Dv+Ccn/BQr9nLxPfTeNvB0Guar4v8Ajv4N8ReFdZl07QPHWnfGDxb5fxM+Jnw4m1FbbS/FemXV9EPBfieRoRYac914K8S65+y/h690n9trwX4r/Yw/a/01Ph5+118IV/tHRfE2npDa3mq3OnwqNE+MXw0u0NpHe2t/CLafxRoli9vZ39nOL2zSwhkjj8Pfy1/8MVfFf9pTR/E//BRn9hb/AIWF+yx/wUM+AP7RPxAvPhzJ42tdM8HXvxk8HabpXhy9Pw7+IWgjWNU02xbVrfU9U0XTF8YLb2+p2l5qngn4haWnhq/0/UvDv2n4t/4LM/AD9tX/AIJ5/tBftGePrmX9jn/gq3/wTk+H+t614h+HQZ9H8UD4hafqdr4L0lPBVtq88Wp+J/hd40+JGt6PoOpaFfy3+ufC/wARa0um6+uqaNqNrqnjz6Gth6mH9nKSfs6qc6VRK90nZp7WktFKL6NNXVr/ACtDE0sS6kYte1ouMa1PZpuKkmlfWLT9yV904t3Tt5f/AMEpfE/xi/aK/wCCtv8AwUX/AG1/if4x0/4hax+zT4f0r/gnt8KviLawteaZqEfgrVf7M8beJ/Cd3cRRBr6+h8BLrM+tS26XN9B8V9TvRDFPqLyJ+237A/xh1nwPJ+19Y2FhbahqWs/tS/EDU5tU1O4nnCySyrHI0lvGY5bieSRHmeaS7AZny8bnJP47/wDBuD8Nrv4dfsDSz6yJ38UfEb4iah8VPFF1dl3vL3UfG2g6HfadcXskuZnuz4bi0KO6aYmQ3KTM+GYiv0o/Y8/5Dv7Uvb/jJHx5/wClbf5/zmvdwmDw86OAU6cZe29vUqXVnOUU3FSta8YbKO2+l2z5vG43FU8Rmbp1pQ9jHD0qVmmoRlKmpuKldKU7tylZO9rSVlb1P9iP4w+MPDPxD/bKv7B9NlOv/tPeNda1S2urIPbz3l1qF+8xQxyxTwRkuQqRzgKMdSM19G/8EztbGv8AxL/b81WWOGC51T9pvVdWntI5N4txfy69IAu4BzEZPNjjZgN3lsMkg18Pfsj/API6ftZc4/4yH8Xfj/pl6P5kV9Lf8Etblk+OX7a9nuwlx8V9auWXsWtNauIlJ7fKt5IP+Bcda58bhaKwVKpTpxhUbq80oqzlGEpOzs0nZRSTabVkkdWAxlZ4+vSqVJzpqOGUIyd1CVSFOLavqryldrZu7eup/IX+018QvGn/AARX/wCC5PiT9uD4BaNpnhf9m342ftE/E/4JfG/wPa2sln8PVj8Qa7DqHinT73Q9Hn02OytrdL3TPiv4PtbWa12eJ/B2uQWrxaOLmwf+wdf+CkXxeZVZfBvwzZXUMCtp4nKspAIIx4oIIKnIIyMdDzz/ADvf8F7vgzYfEz/gn1/wVY8czW0cupfs+/8ABQT4KfFPS7gKPOhXXfE/i34L6jEkn3hBNZfFd5p492x3tLd2DPFHj6D/AGNvH1z8Uv2S/wBm34hX8zT6l4s+CHwx1jV5nZmeTWp/CGkJrTM5OSTqsd3liTzgknqft/DTK8mzepmuDzTLsPjKlGnhcVhqlTn5owm5U68PdnG8b+xlFa2bn0kfz39JfibjLg/DcKZvwvxDj8ow+Mq5hl2Y0MM6LpVa0IYfFYGrarSqNVPZrGwqNP3oxpq3uu/7Qf8ADyD4wD/mSvhr05/0LxPwPf8A4qf6/rSH/gpD8X/+hL+GnT/nz8UdPp/wk/OOT3xz68/nnx7cDjBJ9ufYcYznIzgc4pTjrxwT69+uecAYPPXk9M8H9Y/1H4T/AOhJhP8Aytv/AODfJa+um9/5M/4jf4r/APRb5v8A+Wnl/wBQ3l/V2foT/wAPI/jB28F/DX/wB8Uf/NRRX54ZHqPzFFH+pHCn/Qjwf/lX/wCWGP8AxHHxZ/6LjOPuwn/zL5fn3Z/Rt/wzB+z5j/kkngvqP+Ycv8y/+ecUH9mD9nz/AKJJ4L7f8w0fX++T9fbn1x7wT146kfxdT7ehHH+RQccjC9j1A/MY7fyPviv5k/tvOf8Aob5n/wCF+K/+Wn+nf+pPBn/RI8Mf+GDKv/mQ8H/4Zg/Z7Gc/CTwWOR/zDR7Z/j6fX9BX8eFx4P8ADHgX/g8i0Hw94Q0PT/D2iQfsla9cQ6ZpsIgtI57r9lDxZLcSrHk4eWQl3Ofmav7hzjJwB1GefoM+oI59OeRkmv4nPGf/ACud6Nx/zaJq/HHH/GJniv8ADj+dY4jMcwxdNUsVj8biaSkpqniMVXrU1NJpSUKlSUVJJtKVrpNq+rO7L+HOHsprvE5XkOTZbiXTlSeIy/K8Dg67pTcZTpurh6FOo6cpQhKUHLlk4RbTcVb9e/2JkP8AwwV8E37L42+MS+2X8UFh+OE/n6CvHv8Agnz/AMm26Jxj/ip/FnA5/wCYma9s/Yijz/wT7+DkuPufEH4qR/8Af3xFqTenfyc9fWvE/wDgnz/ybbon/Y0eLP8A05mvfyt3p4NdqePXX/oIoP8AX+tTxM4Xv5h51stf/lrXXl2/4Iz/AIJ9f8m9Qf8AY7+L/wD0rt/51L+wJ/yQq7/7KN42/wDSqz/z+FRf8E+v+TeoP+x38X/+ldvUv7Av/JCrv/so3jXv/wBPVn9evOfy4rpw/wDzL93/ALLW/wDdfb/g/kcmL2zX/sNwvbtivP8Aq3fQ/Ef9hL/gjB8JfHn7Y3xh/bl+M0+meN/Adp8WdQ8QfCD4IzwzXOgW/wAUY47C/wDFXxA8eWdzEum6qtlr0rXnhXQo0ubSe9mbUdcZ002wsZv06/Z8/Zn+Dn7R/wAGtVs/izoHiDVG8JfG74l3fhnUvC3xF+JHwy17RLnVrPwrFfzWHiL4Y+LvB+uxtcJYWe5X1B41MCFEBL7vfv2Cv+SO+Jf+yteO/wD0LSvfr/LnpXqf/BNb4OeK/G/wc8f3+mz6NaWdp8dfiHpss19fyENdWtr4dacQiwtr5ZkUSoVmVvKkDZikZPmrlpRwuGhg51VCFOrTrzquaTU5zdBq973tpyrWyWi3OuvLGYuePp0XVqVKNTCQpRp3Tp04qsny8rSje/vS3d3d20P5bP2vvDfwF/ZG/wCCfHx68dfELQf2j/2k/D/jj40ePPgt4K+CPxE+Knjr4hfAPw7431nw/plx4Y+K/jfV/E8useKfBWv+DDBcXPhvxP4f8aaL4r8Qautn4atr6O31DUdU079C/wDghH+yh+034e/4Jx+LPht4V/aA1DSPin4o+IX9u+ENas/CXhX4s6B8HdCj0X4fahe6R4V0vWJbOLxcuq6TdXFlcSX2uXvhnSby8hvvC2leRaXlz4g9X+Nnw38E+JP+CSP/AAUn074jXGk3GgxR/tY3wt7pd7ad4j+G3w+8O+MfBviGNZQFa4s/GthoV7okYCz3GqadHDCrSEIPev8Ag1Ti13xJ+wB8N/EeuSzmbQLzxHp4eZnLy2MOneHvD3hqJ2bkwf8ACO2EEsTHIC28AGVORx+0jQlRrxcXFYGs4pc0HzqfJKLlGUZ2lJpQacVF3cFe7ffKm8RHEYaUZKTzLDRm7xn+7lTjUjJQmpU1yRUpTTi3JJKV1aK8U/Yb/ZC+MWoN8QvGXjz9tf8Aat1KHwR+014m/tPwf4Rtfgz8Mvh14r1rw8fC2pX0PjDTvDnwhfxJdWmtEx2Ot6DbeM7K0k0zFrHHCZZJpfiz47f8EZfgn/wUmsfir47/ALTHwt+O/g/9prxDpknxD0+1kntPGXw6g1PRLjXPBnjDTYGT7ZcQ6dLqE3hHxFEDqWiao0NvctfaLJJYxf0z/wDBNHx74O8I+AP2qY/EerWtvLP+2X8ZZ4rAI93eXMD6R4JjR0tII5ZGhmkjkiWWRUt2ZHVpAEfHyr+yFqOmX8P7S0ljpwhe5/as+K2ow3hkeNjpl4mimx082KZtofsrJNKZEZmY3HlcJCu7TCNYp0KFbCTdK1X95KUpRqNxTvFzaacHpenKTV1e2pnj08EsVicPjYe2f1e9KEYRnSXOlaag2pKom9KkIppPV6I4z9hXRtL8OWX7QPh/Q7GDTNF0L48eK9G0jTbVBFa2Gl6ZBaWWn2VvGOI4LS0gighReFjjVR0rZ/Y8/wCQ7+1L/wBnI+Pf/Stv8/5zUP7F/wDx9/tLcf8ANxfjn/0OHn9am/Y8/wCQ7+1L/wBnI+PP/Stq9Ghp/Z9lb/ee3Z/h+PY8rFavMm76xwV79daPmtdXu1vvuH7I3/I6ftZf9nD+Lf8A0sva+gf+CX8mz9of9sIdBJ8U/FSHgHIGqmX0PeMdO469q+fv2Rv+R0/ay/7OH8W/+ll7Xu3/AATHfb+0b+1iOnmfF/xanUc/v75/f+5254znFcuKV8DTX93GP7oVn+h24N2zGv5yy5ffPDr9fubPyw/4KzxpJ/wTC/4ODVkRXC/HP4MyANyBJF+0r4DkjYZ5yjqrqeoZQa/U/wD4Ibfs8/BXxH/wSN/YC13xD8OPDGta1q37PPhW+1DVNQsBLeXc89xqLl5pA67zGpWJDgYiRASSCT+Wf/BWP/lGH/wcHdP+S4fBzv8A9XJeBenc/wCAzX7Y/wDBB7/lDx/wTx6f8m2+Du+D/rL/APLjrjnHPYA/P0cXisJNzwmJxGFnKnCMp4etUoylHli+WUqcotxuk7NtXSdro+gx+U5Vm9GlRzbLMvzSjSmqtKlmOCw2NpU6vK4e0hTxNOrCFTklKPPFKXLJxvZtH3p/wy/+z5x/xaPwZ0yf+JYP/jnP+eRxk/4Zf/Z8GD/wqPwXj200fp8/PHP0B617vxxwp7DkflkjPcfr7ZXjjgfUH8OvX0yev44z0/21nP8A0Nsz/wDC/Ff/AC08n/Ungz/okeGP/DBlX/zIeD/8Mu/s9nr8IvBee/8AxKx/8cor3kZwMAY7cn/Cij+2s5/6G+Z/+HDF/wDy7y/q7D/Ungz/AKJHhj/wwZV/8yCdv+Bfybj8qCBzx/Ev/stFFeYfTh6/7y/+y1/E1414/wCDzrR8cf8AGImr9OP+bTPFdFFAH7J/sQAf8O7fhIcc/wDCzPiMM98f254g4rwf/gnx/wAm26J/2NHi3/05miivqsp+HDf4cf8A+ncKfG518WN/6+Zb/wCmMSN/4J9/8m9w/wDY8eMP/Su3qT9gT/khV5/2Ubxr/wClVpRRXZhv+Zf/ANgtb/3XOPGf8zf/ALDsN/7th+wV/wAkc8S/9la8d/8AoWl19Vf8Ebry7Pwy+Kdgbq5NjH8YfG9zHZmeU2iXElv4eSSdLct5KzSIiK8qoHdURWYhQAUV5WY/7lg/+vVX/wBKonr5X/yMsw/6+0f/AEmsfz4f8FEru6s/+CNf/BS+W0ubi1lb4yX9q0lvNJBI1rffGD4L2d7bM8TKxgvLO4ntbqEkx3FtPNBMrxSOjfYv/BC9msf+CeHwnt7JjZ28+j+CzNBak28Mxk+EHwwmkMsUWxJC8skkrl1JaSR3bLMxJRWuG/3jA/8AYvf/AKkMxxn+6Zj/ANjOX/qPRPpr9hn/AJFj44f9nGfET/03+F6d+xL/AMeH7Qf/AGcZ4/8A/RWlUUV6FD/mB/w4n9DzcXvmnrg/ziN/Yu/4+/2l/wDs4zx1/wCjIam/Y7/5Dv7U3/ZyXj3/ANLHoop0t8D64n8pBit8z/wYL86R8MeOfFvivwhp37QN94S8TeIfC97dfteeLbS5vPDutalol1cWv2DxJL9mnuNNubaWW382KOXyZHaPzI0fbuRSPnzwz8Xviz4K1LVdY8G/FD4ieEtX128m1HXNV8M+NfEug6lrOoXG/wC0X2q32lanaXWo3k/mP511eSzTy733u245KK8LFfwv/B//ALcfSYT436YT/wBwnLeLPE3iPx74Y+IXgjx14g1vxp4L+Ld9Z6n8VvCPizVb/wAR+GPibqWnalBrOn6h8QtA1i4vNK8aX1jq9ra6rZ3fiS01K4tdStoL6CRLqGOVe28B/HD41fCzwd4d+Hfwx+L/AMUfhx8P/CGmw6N4T8DeA/iB4s8IeDvC+j2xY2+k+HfDPh/VtO0XRdNgLsYbHTbG2tYizbIl3HJRXjPf5R/JHtnW/wDDV/7Uv/Rynx+/8PH8RP8A5o6P+Gr/ANqX/o5T4/f+Hj+In/zR0UUgD/hq/wDal/6OU+P3/h4/iJ/80dFFFAH/2Q==",
"company_abbr": "FRP",
"company_name": "Frappe",
"company_tagline": "Open Source ERP",
"country": "India",
"currency": "INR",
"customer_1": "RIGPL",
"customer_2": "Mahesh Engg",
"customer_contact_1": "Aditya Duggal",
"customer_contact_2": "Mahesh Malani",
"first_name": "Rushabh",
"fy_start": "1st Apr",
"item_1": "Enterprise Plan",
"item_2": "Small Business",
"item_3": "Solo",
"item_4": "Manual",
"item_buy_1": "Server Hosting",
"item_buy_2": "Adwords",
"item_buy_group_1": "Services",
"item_buy_group_2": "Services",
"item_buy_group_3": "Raw Material",
"item_buy_group_4": "Raw Material",
"item_buy_group_5": "Raw Material",
"item_buy_uom_1": "Unit",
"item_buy_uom_2": "Unit",
"item_buy_uom_3": "Unit",
"item_buy_uom_4": "Unit",
"item_buy_uom_5": "Unit",
"item_group_1": "Services",
"item_group_2": "Services",
"item_group_3": "Services",
"item_group_4": "Products",
"item_group_5": "Products",
"item_img_1": "logo-2013-color-small.png,data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAMgAAADICAYAAACtWK6eAAAEJGlDQ1BJQ0MgUHJvZmlsZQAAOBGFVd9v21QUPolvUqQWPyBYR4eKxa9VU1u5GxqtxgZJk6XtShal6dgqJOQ6N4mpGwfb6baqT3uBNwb8AUDZAw9IPCENBmJ72fbAtElThyqqSUh76MQPISbtBVXhu3ZiJ1PEXPX6yznfOec7517bRD1fabWaGVWIlquunc8klZOnFpSeTYrSs9RLA9Sr6U4tkcvNEi7BFffO6+EdigjL7ZHu/k72I796i9zRiSJPwG4VHX0Z+AxRzNRrtksUvwf7+Gm3BtzzHPDTNgQCqwKXfZwSeNHHJz1OIT8JjtAq6xWtCLwGPLzYZi+3YV8DGMiT4VVuG7oiZpGzrZJhcs/hL49xtzH/Dy6bdfTsXYNY+5yluWO4D4neK/ZUvok/17X0HPBLsF+vuUlhfwX4j/rSfAJ4H1H0qZJ9dN7nR19frRTeBt4Fe9FwpwtN+2p1MXscGLHR9SXrmMgjONd1ZxKzpBeA71b4tNhj6JGoyFNp4GHgwUp9qplfmnFW5oTdy7NamcwCI49kv6fN5IAHgD+0rbyoBc3SOjczohbyS1drbq6pQdqumllRC/0ymTtej8gpbbuVwpQfyw66dqEZyxZKxtHpJn+tZnpnEdrYBbueF9qQn93S7HQGGHnYP7w6L+YGHNtd1FJitqPAR+hERCNOFi1i1alKO6RQnjKUxL1GNjwlMsiEhcPLYTEiT9ISbN15OY/jx4SMshe9LaJRpTvHr3C/ybFYP1PZAfwfYrPsMBtnE6SwN9ib7AhLwTrBDgUKcm06FSrTfSj187xPdVQWOk5Q8vxAfSiIUc7Z7xr6zY/+hpqwSyv0I0/QMTRb7RMgBxNodTfSPqdraz/sDjzKBrv4zu2+a2t0/HHzjd2Lbcc2sG7GtsL42K+xLfxtUgI7YHqKlqHK8HbCCXgjHT1cAdMlDetv4FnQ2lLasaOl6vmB0CMmwT/IPszSueHQqv6i/qluqF+oF9TfO2qEGTumJH0qfSv9KH0nfS/9TIp0Wboi/SRdlb6RLgU5u++9nyXYe69fYRPdil1o1WufNSdTTsp75BfllPy8/LI8G7AUuV8ek6fkvfDsCfbNDP0dvRh0CrNqTbV7LfEEGDQPJQadBtfGVMWEq3QWWdufk6ZSNsjG2PQjp3ZcnOWWing6noonSInvi0/Ex+IzAreevPhe+CawpgP1/pMTMDo64G0sTCXIM+KdOnFWRfQKdJvQzV1+Bt8OokmrdtY2yhVX2a+qrykJfMq4Ml3VR4cVzTQVz+UoNne4vcKLoyS+gyKO6EHe+75Fdt0Mbe5bRIf/wjvrVmhbqBN97RD1vxrahvBOfOYzoosH9bq94uejSOQGkVM6sN/7HelL4t10t9F4gPdVzydEOx83Gv+uNxo7XyL/FtFl8z9ZAHF4bBsrEwAAAAlwSFlzAAAZxQAAGcUB/Hz7SgAAJcZJREFUeAHtXQmsHVd5njMzd3m7n5c4jQOJTUiIbRwggCJKwG4hoJZNVNdFqKUKSEArVKVqGrWU8PwUQCgEFQmQSKUSKUiI+qGItYIINRa0AaUssbEdEnAWhSTEjp+f33qXmTn9vjNzX952Z+4699z7zrHn3XtnOef/v///zn9m5ixCSmmZFI+AsCwBlBRQr/7ZzVf6QfBeaYs3YOdu7B7SFUIBwSHfghT2EyKQDzq2fd+vbrjnSe5dqRN/m7QxAsIQZGNgqnuFJcCDkAL7H7z5fbYV3Aam7IKHlUGZIs7zq+dq+ulA1jxkzYIUzwSWfefJN9zzDcq6UjdNZe+6WHbXJdBdgMkjqh5+5U//5oO2kJ8HWbZix1nUzLPwME938UMZxSxlpuwg+Oepi5I70k17HboooIkgMeAfOnbIfeDgA951P/vga6T0vy6lyAhLLuKSLByvZ4KvampJSGxZZXwMIiZWhHDef/yGr/6yqmMMDJv6kLuptY9RXjU/DkoVIUCOm8GGLXC0c2yqqPsRdUcSk4FGh6J7JAmGZEGOReiyAy3DmyHiL1kBmKZWbWOZJlYNbApTBYXNK/73Q5fBwV4rhFhC7WszctS4RP/djHrQIdTFuv410I1CV3XVX4H0JTQEqYV5ITyQsSp0onF4FqIJKuHepQfEp04SzWroIq2tlVA3MCTU1fxdj4AhyHpMwj1T4QdixhCcCvceVgCnYju+txN0ULoIyw11gzqRrr2tWGekNwRJwFUKp/dJsZGOeOLQt7ptpG+T+wxBkoDz9H+Sm6RCzeN9rFpNnRs8YAiSCJh50JcIUR+fYAjSx8Y1qrWOgCFI6xiaHPoYAUOQPjauUa11BAxBWsfQ5NDHCBiC9LFxjWqtI2AI0jqGJoc+RsAQpI+Na1RrHQFDkNYxNDn0MQKGIH1sXKNa6wgYgrSOocmhjxEwBOlj4xrVWkfAEKR1DE0OfYyAIUgfG9eo1joChiCtY2hy6GMEDEH62LhGtdYRMARpHUOTQx8jYAjSx8Y1qrWOgCFI6xiaHPoYAUOQPjauUa11BAxBWsfQ5NDHCBiC9LFxjWqtI2AIUgNDM5daDWA22e7emdMGE8qmOe3nkR0H1YRxLhDyghVegb1cTScuJRyOu7SlY43OcCdFoC45t+McwOWEiykllIQJtLsFU0NK9kAEwQSZU8KxjmDaT2VCAtv57dTBHRsbcOO9q0Cnq6lt1d7O/6BocdtaCYRjK212QFcuEpTGpmx45IgQU4cxZeWk9v6ncQSBi01ZtlWQ/vLkyhNrTdy530eto2rlqIeFW9xvBRJBhLPaBvCi+mZ45zTqmCianFZe2AFRl/PmIlj1xFeeJTjHMFdCsKWdVStkWVVdOyDiuiyjyLEMCYmC6eUDknPdyRrs0HQBHeVXgEfK5++/dCjjVG6Eax6AWbcHgcyg2qGbdjT50rHHrPPF+zOv2/PJ7O6b8pi8mqsHwLfgXDVsCUcNLBH4ll32LHepIuwyAMZyA9SkQ+LaQKYSZOVSZSAoB1lLBlgIS9XTGxbIMIHWKpeVs+WSf78seY/j9LwSfcMr2rcTELAlVwF8LwDME0Oy+JPjf/3DBcpDSHUkiYYR5EV3euH+re/MWPJWeN0BGpEgRljW9NF2mZO1s4fytgQLtmOVM1w1BxGBItRO0UF4IIlUKVm5i4tW9gJk5y4uO9C+pHxcCu/C0rZgtjwmvQBQMTI0ksS7VURJ0quRLJPOjUCwA6u4ZOdPvPxr77wLYH2Xl5E9upFEswhCcjBJeeFHWz+G9synAJoDv5rFPp8VcXi8838DyxGDwfng55n9A/+Ye/lWrltGgvB+PdELwzAD0aVdtjKzc9bg82AII1BbSKICQIC8zy3sDObLoxaiSFgHw7/qT0L6chrOuQTuotnYVvpuIMUq1ICN5eCkUUDqQ/ZPPPpX3/6SiiTYCZka0WODstq3S68IEt1zMHLAH0EOqwK4ZoBmBm6pZAV0q5BuHxSrc1IWCtvrLFf5H32o3sJJCFbMWVEZHbKK3pw1cJZNH17fivXV9WCaP43IAXJgdQbcK5EYjB71RxCqAn1cXJKJuL8agLb/irRGYcxaCSzleXxiGW3xKUSSpxhJ1D2JRisHd7gl3wjKqMVwQ857DjSib2XkwNULAC8HEFX9FtUrBLjjGxnBQsAIsiKhbcUT16SouYOL/Zwsj2WlN4hqvq4AtCan5Z8kGO5oAtxvDPrzpbGQHDxcPzGqmYWMV8Sigox56WyUVoms6pocWL2Ab44Q9q3Xfe1tQ7JwFC0FfZ5u6UOQKQWYxRtyAIh7DjarEDmqzsmqM80tKo62bClBEVtYds6qDKt8oA/VaCbhQuVb/nxlGM/29LFdM8rgmoiYaB1Ys6DnKxdE/kaV1dSpZiFqUpLal+kBMu+8T4fG59MqMAEPjeACYf0dEqO2DnofQc3MsOFIPwcnsKMo2LzMuPeQZT8XVidhc6X5zLp/JaMiLO+D+QN4KwPbI53eh4CmWqNdF1APgrBePBISBLXkdoWKqiu7jk/bBGA7H2DjZli5RHP58tqAD9PwvKC/Umjtqu2PHGm8SdshPPQBOnpywfcc1FXdA7B26ZdEXVrUpno5b7D7BRaQHbpEtaEMba/TUyx9CBJZHAL1WeyIFIvuH9rh2OoGux0ZaZbHi0zRRzDtCKLbredKtq78ro8JjSSdREA/gnRS2wbzJiHwogBvtMImcf+0a0IgqoTvN70aNHPs6YYgG8Gz/KjJtkascjCC3x68qK8cCS/10f7HA7a+0moja7a0zxCkJnzsAzGI3pEX/d1B2V/A24zEvlg189LtAB+hSg4GUW9TqpFENyl1kMcQZCMrwH94T122cmIsOBe8yb9Q5FtL1rVNA6bTkyfIgodHRQRGdocxIWQjH4j2NW3vmDz745BqZjGKDFt/4p1ZenuwUP6tyNjoGyHZB4ZeVc9GMAiyevLEqpr5RnmHD+zUTh5I3HgnFHaXxKnNJNWsQiC0ZFkEwRJf8ZsUj4BenRXjZU33KKMImuhlkROjwWzw98XfzJZy14790B3KXGH5GIBR/5AUjttjewaOGaDjLL8255l4UajeMJNLzSVwW1ag1iw6vCg5ms+qOQF67SpDkDiLKZL4VlEMi8uD57zbS+WZA8FVQz90x3PPWA5GQ9X1cgM9scAIabuuEGMgC0bPNUcQsIqvUxmQ3IhhzDlOgxePqeaUXMT1C2hY+bgMd+kmJSFgCJKEUBRJSJJtcsb/YOl/Zt9Weal7xh3PnBd5p5TQTkFbP7CtIF8U7rPfzL70nictZ3GrsDIgV2AjniQVr45zjCD7cdlWRXpyUPrBzYhFl+FYEUdImJqJHRxRiA9iVEAML2zv1cuqmtlumgOGIPWYGh7G5laJI1PRj5LR5MrKkxXWwAgHSU7OgYnsu//kx94s76inuKRzXn7vuw6Bt9tQ+EUQEDaMiQUgiDqKxhmjRtypSeVuxuOGIPVaXZGErs5qexgV90jYwIqjR0ge1vC82n3uRy8b2/OWMxenpvZnDxcKXgHduuuaf6uwT+6bOuWeLBwtv2zq8BgGT+D9JQIDBlGwEyQfStVSgwWrY/hb86RaF5v9eFFsUv0IhK4Gd2QTngl/o33rMqE3quaNqsD5S44uzrLnOyZpOYXu3SeD5dlauDMhCeuwmmUlM1cKODUDTo+22uRgljzJpOYRMARpBjvEjnpSfWfVk5M5p1sIxN7gdUsoU65BQBcEDEF0sYSRQ0sEDEG0NIsRShcEDEF0sYSRQ0sEDEG0NIsRShcEDEF0sYSRQ0sEDEG0NIsRShcEDEF0sYSRQ0sEDEG0NIsRShcEDEF0sYSRQ0sE+oMgpsORls7VD0Jt0BcLHY24DEGa6bQljqFn38EJjHbgNAIoHV38wq5M+JvUp6nOURVparTpy1KjxJTlkmovWDfsmUybK787PGU5B08/IA7vTbev5VRBDWZbJfBqgkyiGzWcFL1MVc/RNK18sFqYK4oY2ENW8K9aGGaZLNVz1nySQNAq5BHVS2LUmuvNz3YhwFqNI/ZhD8wJYWG1NZhQ/Y75w+7OAQYTY9DMfJHnHVX+dzDmks4dOjR5zH1g4qBXLSEiCKLGJHQjOZCm79x2uWuLXRgjNAxOi+Wzq1e1+RMzumPBVQxJKgVFcUBeZeWwwqS0BtBb3F1F5zXlckAGwYUlfExF4IMlHBKEGTtwoiHJGrQ6/1Nwmj1FjEVY5BLb8nY6IhhEVIhpkCB6wGwYjYyBl/62PddNPnlD4MznpY2FpwIuLtbZhPXLMDuYWKh4mWd/M3HNsySHmITAE8q3pDs5KeyJCVABO87fuf31GIDzEWHL14MU2/BdTSTNSQc66XBqng16N4cBzYthmcHCTL61XQ0JUsFhPUgQSQUO/MGFaJhhzTssLgPLqKgTP0hifXZmT4sIsEkl7Tkp/C22WLhpwF7anRf+CEYbc+qU2rUVxhKjniNHApBp4CbfnX8N4w9JRep0OoEcKER6bqYyvf/Tv/5FINx75MS1v2S5FNxV5MCPC3du/ahti0+i9h2Hyy1hF+IjgkdVxuonr2xzUvcQYf6sT9Q3/mWEqFUUhK8e41kZUDyLWJJHJTYDyNVEiNUTauVh9rcHgZAc81i2dLfrTL99VJR2ZjF5C0zI+SlohdqWUHaM7AyycKYVNljwkVJS/OCIaPlHKLFgS/+t+z5z8s5TH9//VQqvYt/M58Y/AG+8C/V3FrKehXxYmhcTBKjmi9KSmnZuC5FUiAIZ1WiNmk9hnIPkhHjdhh0kEjYe4nWMPFuwhw1h7jOpwwgocoglTAmx03HOv2NMlC7NWk4JjWa4D5dPXG+11ftUlIjMpypHdREntFP3oDi5s59hOViaWizC986Bmhlw4I79nzn1PkLnXvzClj1Y8v52eFkZB+dRheeqNTc+000oD86uSuWnKj9OhhXHCDF+cvaOLGd7A0Uupiv85ixN1U1oaDhzbx4S5e0Zyy2iuduA50SGU9ZTtleVNkxJ46ZWxylPQmkuOLoIMVDBBrdd+9mHf2YHFZtMeQm2uZXk6DVzKzhJKtY4bGpxBVcgXNW81/TpBXkZPSx7EcFij2sXr8SjlUobpjKt3nikRg66SbSxfkYrxJJz2LHL8Z332mivvwl+VQJjMUNGepTtiAOADRGsnGc6u6yPYklHStzkmRLYsrRLV2REkANb0BoKq6QexoXtPDw24tTMlngDv+zBHj5/jnkW10P6giFR1AgfyPeQ6L0pKh6r+CPAuj/cJ6piuZYkXuTI3Ywaw/AoNRVlbxqoptQmbtSEpl0HGK/5kDaaCbXaOmpX9l3Lh2FQYr12MdTpVxxdU9EUbBBoHQH0fTHVbOswmhz6F4F+aTj2r4U2k2bouqGbuloRhM/Y0GlEO5B0M1rfyoOZ8HXTTSuCEBw8cGY3ETxpi54n6IaYkaf9COAFHDJFxeiWdaseV3d3b7/qDeXIt3xyTpTRycVHnypH9c9tkcIqHJmY1JAdUj0ZzSo8TsW9sFMRgbuER2IkizZJK4KAEgJvZDzrAnrl7pSjeO/EFZXUi426UVtxIq7lq092WekKRVT54WsZNq5XSNYO+7M6UXEWX7rUdmdH3FYDPaIHOGKLIH/Rkhm8nEO3d42SXgQhMHxFc1YsyJyVsbbIAXRdYydG1QdA4ZZgkogMPIuvddkzlIB3hSAolx7EBXQ8vJv1to227sjuSE5WiiX0AsToS/S2Vm3RbumH2h62UZ1YFcK8yVacgeZJic0qWgkdS/EWftbyBy/oRg5Kpx1BODaEkcP+vZhBQ8uXY3IQkSXsBkOiUOqYhOMqauBvDiejZ68yQ9JlMTm2dIiM8OAzY3CkkRcyUmxvKTtkNldCf1NrBJ44RkWRP23YHf1IBwwewhCcHASArgHeqMPrY6RZ5g/JwZtyOTgt/cHzYRTUq3lFU2lHEArFKMK6XzwtZsUMBkKNWXn8czHOkERJThLnZqw/oMvi4zBB1xyITsPIQXKA94+UF5zwKU0hzoVqqIdVpngklykGRSv3CCMH/rFTnYsvMS5ZI7/27EYMqWDMUP5KBIJdGF9Hq9XMGWIi4oBHqot5pmQF2Xkhc4tkSrjVvLRrB8T057Y+27XS6ygYRFHGx2hDNWw5jiA8EUjTCbfg7C+M/8P0vz75rfEtw7ab+hj7qmrb0axi5PBBjp03PU9naMmZUWWLA/feNFiq5G02t1REqRaW8mfFu9Y+8+HPXtz/2V/cbvnZv4NuGGLA7uq1SRKKyMe5dlhZaHbPsRZCPSPICilFJqwfMWKw6vwrjq7+GpmFwLOOgjNa1pXvuTCz+qz0f7XarFopMZ5akGALK/d1+7v0BhbR0uIjleoIqQSRGDECdHPiXT5bofom7QlSbTyoe5OEqimKIKq/Moa+qPH0p6dEdi8GUnXdBKpZRZq3njDDhm1hAdDWc2oth32n9zknJyawonUlw3tt5Iat/vsI3clBdPQnSCM2hPtVzYPqSTnjqSnL33tU1WyN5KT1udKaaGgB0E4pg9k/otrfJfKdKqar+fKZUV+ltdVqodBX6umlzKmpEG4114JeorVLmr4jSLuAMfkYBIiAIYjxA4NADAKGIDHgmEMGAUMQ4wMGgRgEDEFiwDGHDAKGIMYHDAIxCBiCxIBjDhkEDEGMDxgEYhAwBIkBxxwyCPQXQaBNtatJ1bRTU9Vv5rPtCOwrhP1L0FOx7XlrkmHv9MWqs5sfu8Ozv2sQhC9B91n7nKnDazugpIt+AWvaWfum0IWqTZ0VJ6FbtZtHuqqsKu36t+xBlXQ9LMNhst3FeJVgbfyhP0GqxGAllTAwKDIRgwiGTAVcIM/ae/RkGb15+yrJCXbE1KKTmeolLYSDxUBUN3xGkuRoEs5ighNNd/fmHZPEYAPQ5ZBupABLN1TQ6Z3uX2sIwYumwYhEd5CXXXjra8fOLs1XacZdqaatGZeD7sSzc7ngwIkTi/CjF6VsQhIOmLrsw98dGPHOOm4eI2+7mJawLMiZuwsXsbjkoJRYZVJIrPinhnjUDic4SXVzFw5XkwKdfFpZ26TniEK4M6DGeFUpvemBrPfcYN6/6LiyZHM2+thE90OT2A0Wnaf9i5nH+V1FlNir2n8wEpPu4GGU6Qi855GLRfm3B44fX1ADcbF0ZCOlcmFJRo5dH/neYC6z9G+49mroNY+8HDVtQiOZtetcTNYjg8CzM/mrhZO5ErZB1F57F7imMBgEHMdSgNmykx+et7ODWO4P10RRZc3ZXf+pXxOLbsN6CFMdFB/dOlp+LD8YlKK1S1jlcEsgCc7hUp6XIt6Mh1fUCjk42tEEaiCWofQxDLezLhsphbVloUAtGkvqnqNgMXKUMyMkx3XIgKtocUw6UuNZ8qpWEywTSL8yBqKgIlLDbRMF4aB0q7w46BdnR5386Kw7sm0ag/c5IjHx2lblbfR6vQhCcvBeAysALz68dUv5sYEBOx8E9kC1tk1iBtyEYYe5eFYJa7vMwG+QY2vNmkZBXXE+DK4iCAfUz0k0t3hsCv+avYNgswrV9DyyATnELGqLiCDJ2KyQq01foZ7EaE3HxrJ99hAybSAqhmTwFi6MW4HnZMYuPdsmodqajV4EoWq2L0tntg0rcgzhWRTTMuzJFQzchPGDlOCTFejHWfu6E0F4E0oPQsKNiHSncS+yg79aTZiGMNINC2dzVGh39INuLBiNJkQOZZrGI4BwHN9bmhu13Fw5M7x9Wrd7En0IQhK4iNYLebf824FBkWWtj//hrVxTLoXbRdqvi+GDZbN43pi2UQxitcwJfsH/riSWvVx7NSkBsLHtwF+6OOoOjMzjXgYrzjZOtCYLT7xMrycIaIb65/PZYNZ2hAuHaoEcKzVn5Zb2trb8lb/N95UIgAxoFlu+lwnKiwOqRbzycJe/60UQVEbBvEtqdKtV1GVzbNbiw3AYeJVMt2JhLeT1IYiSBE9xK2bRq1rG6vv9km/k29RsaBNY+hBkpUK6VSMrZTPfNxUC/dvLbFOZ0SjbKQRUe8ZU2J2C1+Tb2wjwkYHESyeJ5+rq9WZvq7NGesP7NYC0/2f4OBZA9xnWfGgg0NlJLvAFz+N4X5AHeK0+0G4//i3kiN4+y/Px9pn1WkClM5fCnTAHfxvf83RGzDpzVW882e0lj0epTyCCyB/jJVYOlQBXclJH68xJz9OUBlyYBWsd9oM+eqIcSaVqWrpOETUu1yfuff+B44APATTJQrEHbTcTfAPaPo1tBO9rSj1NEr5wYj8sXxQxXawHJUO6ROY0H+1GQDECT0JlCV1EFgA+e1s3H7DVG/RucIxlht0u8K0MrUaw4xnfydxnj90y8zjCyR04IQvVhnEaa142TxhRGDhT21AmO/cogNnpEL/rTuHJEvdSogyCzHcD5rqF7asTQ8eSgY81BrEaGHsX04aRHRtUlWZkU5+faW4skwNTyogfg+jGg1Vp7Dsf+edXPKneg2z5pwv3gga34pU/2XMJZBsCMTJwMj7lSm2DkBgswC7TkKDaHyfJ03mcnRX4wCGA/BV7Bu+a/Gr0YF4mdRIB+jFrMwxn8yvPgy5YyhljVNihsu4mF6we2pvNG3TEVEPl6Aed3+A3KAeEkIMQdztkxuhIcfvJj+9jy8qyJyfDpWnGb5v+ShCI92Df16HgM1AQUQSDjdALtePbinJQPkDiX1WFMIywp9+qTR1j8yk8kdhiZXUxC3JMs2mFDBoLPyzPpBYQIBPoR2iiB95ziCbnYZQSMgwf/PBw3EZDkhrsGKx6YKt1a9iRtrObUNEOZUgfAeEP+PymI5z3n/r4/q8SDErlTqiJBCDdpCW2TciHsP+h6Tu3Xe7aYhccb9iL3JAXdCqh+hAOWGjnikXvXO4W4PROWUYkkBjrsEFincWET1RcGLCDJiHE9MEKSqvIobgTnmb+poKAalMxasDZPAyAQoVFZ1eRhHVe1WprhaEbojKWwZgsL/6XLZwv43ERHhqlkFj7YnAeKtUFz6s8+5uJV6v1Ojl605qA90HoyAHxHTusSSg0Ib2tt53/PcTjlno69++vege6ugdYPHkJvq5WKIR0sf6uDjJURmaIPTl1jTZVgWQBAzjDgY/u2NESrPEYoEbjBBt59Ob97Yl/ecVP48/u7NFDk8dcOXEQq/WGaXUNDXIo/aZUu6x6Tuc/T1viGEo5yPKzfP4M6iKgAG3c/6B1VafHK+t0XlpTQiwCtAITbwvpR0mtXb5sxGN51YgWfB9nHZ6ynHOnj4kde88hs2bHXjKnxtJUAQ+mVpCDV68miMoP9XVBPcVqLPdWzgaWBxHpmAXu0gkWvy7/ieOHOgkn85y485ihSWkiUDVf1UK1yqaxw3PAEXXPchT+B49Qo81qXZXW/g0IklbR7SnHkKI9OJpcNkaAj7hMMggYBGogYAhSAxiz2yBABAxBjB8YBGIQMASJAcccMggYghgfMAjEIGAIEgOOOWQQMAQxPmAQiEHAECQGHHPIIGAIYnzAIBCDgCFIDDjmkEGg57uadMOE1d5FvdDNpSorceoFebthz7gyDUHi0KlxrB5H4zkcrIIP9MYLPzPFYRWx953GwqL1dlGOZDhSOMKRdv5e74Jzwh1X3fuYWTjDd0yPWQx2CGWxLKxgt5IvUc7mIw4BQ5A4dGodg8ejOk50NnbUR7d99FDlh+Xt/vnPuSKUtfdkUwuLqmmMjt794Yvilu9h/IRaO4WjPhP4GpJiFEMud/rhxMccfJFwUS3NN91+Q5A6TE4mKIciMZDg7ZyBPouf9Yy75ozMuVIQ7Pn+NVd9uiy9RQzrzGDJsmih6joEAMWk7dqOv1Ap57cNfPKlf3xtyc5uwSD8ASyuAdFUL/ENMwrsrJzJjXi/G9u19OOhS0rjGL05HnjCkGRDuNbtNARZB8nqHSvJAWI4fiCHQRMO7KnrAUc0zEVgHOpLMNzhFs5KgcGcGAeGHBJjUCQLGklY6hB0zEu3PC/2nvlODhN/sfzh6IyYDy7X6Fo35Hd4N156/fwPrnjj9Kn8aGWXV7HLkEERP+bqzX7IECTGA9aQwwU5tuB0TijGKrsu91YjIkMycRjnDNpmuCmAx0fjiOvKBEVhDUBcwouEXcmMb/VtB7POqOZbrI+rliAuy3jzzt4z/zm+Y+bMwH37//K5h4Z3lnZVik6Zq87GYLDZD9VVC25WkJTjqPpe2iQHnDkL11b3AvViAp9e6X/AmzU/1/SjZ8Lr69wgBtpSKmrweojBgZe8yeFn7Y1EZGJTa2lwj7d9+nj+3ae/tfPK0rw762SCDI7VR1KVzab7YwiSZHLW9ZJzJoEcqLHXOHzS1WuOr+TKmkP1/Fx3OV27vo0ksoOKKA1c7l3ywkODb3vuV2PP2ZifGWldtvXIsknOMQTZwNDKa7if0QMt/wD3HHCi5d0bXFLnrjZkUWdJG5/G8vFg2B6Su5//9fD+0oI7B5Kwnd1tyTaWt/t7DUGSbIClIdCewTuIqK2SdL7ux/FeJHBzcmjxD5krijOZcyAIoosJIjXsZgiyATBrvIUz4K/ZtcFFPbJL3bRjCWHXXxLDlUWnOgFU3yjYZjsYgiQAygeyaH70mf/wbsrDU4JqwyrmTXwCPv1+2BAkxsLL7mOa6DEo9fch7QgSoFeGbpC3L4Jop5pWULcP5/appQ9B1EyVFu6IOf28Srg37p/QH+oUadai/fjkucUsNLs8rDhQNapbIvUiVRMJ9SFItbtTYL+gCTbtEwP2R9eQALM5s5Nj02EErOC1DLF9RhBqReXkeQX6kSNQsWmY2mc35KQHQfCYyCoUQkREcAJgFdHSYkdA1d7qA28QJT/w0B+Lr9Bb8G7GVIkXM5g1vV8S4VCP0UURyp1Sap3aSzW1MLseBCEq+6YUILMl6ycA7AS4MYod6HTKbhVhlckT0tooEgpWMvF7M4kmtpEJXr8HMxUPC122J2UCv0QP0qWWbUErmrYClLEmoDxVLA0+qPLaV2gJ9xbkWXepPgThQj6HDzsHjh9fgJR3wTu5jBqWghNYIDKMJGGIUS0MfO3sZ7VIltlMq6ha/4EgNsixOOd5ZbxxbPmmCtWFcAO/nPX9pahH7zqj1r+DGMIFlIKhpiHpUvhOKARsa0msCYjZ3KX1xWfufseiODzlyImY/vv1K9eWM/XqzTs1hV53QuyW8rtPve5Vn8D73U8BwG0wGlYr4tolKqVVu6CSRuWPZbqq4Z7+jUTviU08AcTASA0hZrzK0rPF0hz2LUfC2IvrOAgJRN4vzwciZ5dtewA/satKyYQMACTOoGxUBJhKrsnHEScJF7br8DJ8bEJvVeSw5Kd/9+XCDwAXD6YoS7JOehGEzYYQJOuK/3v4S0+8/rqnAOCt2H0AqqjFVZbhTdat5TM4dAP/bce2o0gLJ1I2TMgaTliR0r9QKi8+Xy4vkFUkS/tcEJQAIQa90qzjZLyy7QwENvqM1EPeKskBtS/sMThpHlnZYHT7xIuFB8WwJIl7DkseBzW/SHKoSyBTdDQ2hzQP6kUQag4rV0nCSHLiuuv+ezRn3Yie3geEHWzHk6CMGnTUYZTwPgasEMVi4F+14Ht/BmaQL4wotTkK6/JGnCtZzlf88lLge1wiq73kCBUnSPS0vFdeyAq7WLHtbCBsl02wOGhCFdh1HmtSFud+YNnuo1i3Oe9Lu/awxLgMGzgGeBi2sMgqnlb5wamiP/igalZRZg3JQdXYZmhAxRRPJWiFgm0dPdrQ+It2Szj1sive6DrOfXBGD0ixV1+sA7J8IsqQQ2JUf/OzU6lBI4JbjBfCHZx54i/+9Kz8Safkqidf3nNYU4c5jEBLR9TnJn0tmgSM5JjEoCLcvKPC5OCiROdcm02zv48dOqSia9Z183h5yVV4sYXRgI4ft2Hd7OWokYbVGTXq30hwnm+J+fE9ajXZyUjXZrFq5DqIKsTkpE1icDVZebTAoJsGTI2IuXyufk2sZdGiL2qZapoURuUufK49pSO/Dx5U2aJ2his1VoK21l6jRiBBeaQ3YwOqDWq5JrO6fwKdI4x5eGrZA0l/gkQgpl3LnIv4iJoZDOkVl2/M42xMJcErzh07xjo8RSVTLKoxSNadrW8Ta52oZodBIH0EDEHSx9yU2EMIGIL0kLGMqOkjYAiSPuamxB5CwBCkh4xlRE0fAUOQ9DE3JfYQAoYgPWQsI2r6CBiCpI+5KbGHEDAE6SFjGVHTR8AQJH3MTYk9hIAhSA8Zy4iaPgKGIOljbkrsIQQMQXrIWEbU9BEwBEkfc1NiDyFgCNJDxjKipo+AIUj6mJsSewgBQ5AeMpYRNX0EDEHSx9yU2EMIGIL0kLGMqOkjYAiSPuamxB5CwBCkh4xlRE0fAUOQ9DE3JfYQAoYgPWQsI2r6CBiCpI+5KbGHEDAESTAWpjjjpGp9l6hT+L/vVGurQoYgteAshAeEDOYxKSdWM8CEz+G81LWu6In91IG6UCelG6WOdO0JBVIW0hAkAXBfus+gqj0Ph8pgmt6ejyVKB+hCnZRuCfpv9sMNzpy/ieDiJNlR4+pbV+/+D8zt/H5ofxZ7s73aNEH0gL2tMj4vwfJtX3/PY098SFl0ha6byMJ1qWoiSC2YQI5jh0Q4ubcQdyOCXIB7DYEcJTparct03a/IAdmVDkoX6ISklnnoy7us9ljCRJAEHCex1McEpur/ztW7Pwpa3AVulFELz+EytuNZwehOFs7bzqUGAkQ/riabxe9b3/XYE1+p6pYAwaY+bAiSZP4VzY9vX7PnA7gLuR2XvAS0KIEoWNvb6uoKWEnigwxY+4frEFpcLOdprOVwx7sfffxedd0K3RLz2aQnGILUYfiVNe33r7lij2c574PDvQmhYw8cbriOLLp2Cgg9j2bh4yDyj13L/8afP/rU4xRmpU5dE64HCv5/TkFf8RZsb3gAAAAASUVORK5CYII=",
"item_uom_1": "Unit",
"item_uom_2": "Unit",
"item_uom_3": "Unit",
"item_uom_4": "Unit",
"item_uom_5": "Unit",
"last_name": "Mehta",
"supplier_1": "Google",
"supplier_2": "Hetzner",
"supplier_3": "Digital Ocean",
"tax_1": "Service Tax",
"tax_rate_1": "12.5",
"timezone": "America/New_York",
"password": "password",
"email": "[email protected]",
"user_email_1": "[email protected]",
"user_fullname_1": "test setup user",
"user_sales_1": 1,
"user_purchaser_1": 1,
"user_accountant_1": 1,
"user_email_1": "[email protected]",
"user_fullname_1": "test setup user",
"user_sales_2": 1,
"user_purchaser_2": 0,
"user_accountant_2": 0
}
| agpl-3.0 |
pleaseproject/python-for-android | python3-alpha/python3-src/Lib/test/test_structseq.py | 57 | 4010 | import os
import time
import unittest
from test import support
class StructSeqTest(unittest.TestCase):
def test_tuple(self):
t = time.gmtime()
self.assertIsInstance(t, tuple)
astuple = tuple(t)
self.assertEqual(len(t), len(astuple))
self.assertEqual(t, astuple)
# Check that slicing works the same way; at one point, slicing t[i:j] with
# 0 < i < j could produce NULLs in the result.
for i in range(-len(t), len(t)):
self.assertEqual(t[i:], astuple[i:])
for j in range(-len(t), len(t)):
self.assertEqual(t[i:j], astuple[i:j])
for j in range(-len(t), len(t)):
self.assertEqual(t[:j], astuple[:j])
self.assertRaises(IndexError, t.__getitem__, -len(t)-1)
self.assertRaises(IndexError, t.__getitem__, len(t))
for i in range(-len(t), len(t)-1):
self.assertEqual(t[i], astuple[i])
def test_repr(self):
t = time.gmtime()
self.assertTrue(repr(t))
t = time.gmtime(0)
self.assertEqual(repr(t),
"time.struct_time(tm_year=1970, tm_mon=1, tm_mday=1, tm_hour=0, "
"tm_min=0, tm_sec=0, tm_wday=3, tm_yday=1, tm_isdst=0)")
# os.stat() gives a complicated struct sequence.
st = os.stat(__file__)
rep = repr(st)
self.assertTrue(rep.startswith(os.name + ".stat_result"))
self.assertIn("st_mode=", rep)
self.assertIn("st_ino=", rep)
self.assertIn("st_dev=", rep)
def test_concat(self):
t1 = time.gmtime()
t2 = t1 + tuple(t1)
for i in range(len(t1)):
self.assertEqual(t2[i], t2[i+len(t1)])
def test_repeat(self):
t1 = time.gmtime()
t2 = 3 * t1
for i in range(len(t1)):
self.assertEqual(t2[i], t2[i+len(t1)])
self.assertEqual(t2[i], t2[i+2*len(t1)])
def test_contains(self):
t1 = time.gmtime()
for item in t1:
self.assertIn(item, t1)
self.assertNotIn(-42, t1)
def test_hash(self):
t1 = time.gmtime()
self.assertEqual(hash(t1), hash(tuple(t1)))
def test_cmp(self):
t1 = time.gmtime()
t2 = type(t1)(t1)
self.assertEqual(t1, t2)
self.assertTrue(not (t1 < t2))
self.assertTrue(t1 <= t2)
self.assertTrue(not (t1 > t2))
self.assertTrue(t1 >= t2)
self.assertTrue(not (t1 != t2))
def test_fields(self):
t = time.gmtime()
self.assertEqual(len(t), t.n_fields)
self.assertEqual(t.n_fields, t.n_sequence_fields+t.n_unnamed_fields)
def test_constructor(self):
t = time.struct_time
self.assertRaises(TypeError, t)
self.assertRaises(TypeError, t, None)
self.assertRaises(TypeError, t, "123")
self.assertRaises(TypeError, t, "123", dict={})
self.assertRaises(TypeError, t, "123456789", dict=None)
s = "123456789"
self.assertEqual("".join(t(s)), s)
def test_eviltuple(self):
class Exc(Exception):
pass
# Devious code could crash structseqs' contructors
class C:
def __getitem__(self, i):
raise Exc
def __len__(self):
return 9
self.assertRaises(Exc, time.struct_time, C())
def test_reduce(self):
t = time.gmtime()
x = t.__reduce__()
def test_extended_getslice(self):
# Test extended slicing by comparing with list slicing.
t = time.gmtime()
L = list(t)
indices = (0, None, 1, 3, 19, 300, -1, -2, -31, -300)
for start in indices:
for stop in indices:
# Skip step 0 (invalid)
for step in indices[1:]:
self.assertEqual(list(t[start:stop:step]),
L[start:stop:step])
def test_main():
support.run_unittest(StructSeqTest)
if __name__ == "__main__":
test_main()
| apache-2.0 |
dgjustice/ansible | lib/ansible/modules/cloud/ovirt/ovirt_permissions_facts.py | 13 | 4468 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ovirt_permissions_facts
short_description: Retrieve facts about one or more oVirt permissions
author: "Ondra Machacek (@machacekondra)"
version_added: "2.3"
description:
- "Retrieve facts about one or more oVirt permissions."
notes:
- "This module creates a new top-level C(ovirt_permissions) fact, which
contains a list of permissions."
options:
user_name:
description:
- "Username of the the user to manage. In most LDAPs it's I(uid) of the user, but in Active Directory you must specify I(UPN) of the user."
group_name:
description:
- "Name of the the group to manage."
authz_name:
description:
- "Authorization provider of the user/group. In previous versions of oVirt known as domain."
required: true
aliases: ['domain']
namespace:
description:
- "Namespace of the authorization provider, where user/group resides."
required: false
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all permissions of user with username C(john):
- ovirt_permissions_facts:
user_name: john
authz_name: example.com-authz
- debug:
var: ovirt_permissions
'''
RETURN = '''
ovirt_permissions:
description: "List of dictionaries describing the permissions. Permission attribues are mapped to dictionary keys,
all permissions attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/permission."
returned: On success.
type: list
'''
import traceback
try:
import ovirtsdk4 as sdk
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_link_name,
ovirt_facts_full_argument_spec,
search_by_name,
)
def _permissions_service(connection, module):
if module.params['user_name']:
service = connection.system_service().users_service()
entity = search_by_name(service, module.params['user_name'])
else:
service = connection.system_service().groups_service()
entity = search_by_name(service, module.params['group_name'])
if entity is None:
raise Exception("User/Group wasn't found.")
return service.service(entity.id).permissions_service()
def main():
argument_spec = ovirt_facts_full_argument_spec(
authz_name=dict(required=True, aliases=['domain']),
user_name=dict(rdefault=None),
group_name=dict(default=None),
namespace=dict(default=None),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
connection = create_connection(module.params.pop('auth'))
permissions_service = _permissions_service(connection, module)
permissions = []
for p in permissions_service.list():
newperm = dict()
for key, value in p.__dict__.items():
if value and isinstance(value, sdk.Struct):
newperm[key[1:]] = get_link_name(connection, value)
permissions.append(newperm)
module.exit_json(
changed=False,
ansible_facts=dict(ovirt_permissions=permissions),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=False)
if __name__ == '__main__':
main()
| gpl-3.0 |
karlcow/ymir | ymir/utils/make_post.py | 1 | 5023 | from datetime import datetime
from glob import glob
import locale
from pprint import pprint
import re
import sys
from textwrap import dedent
import mistune
from PIL import Image
ROOT = '/Users/karl/Sites/la-grange.net'
INDENTATION = re.compile(r'\n\s{2,}')
META = re.compile(r'^(\w+):([^\n]*)\n')
PATH = re.compile(r'^.*(\d{4})/(\d{2})/(\d{2})/.*')
TEMPLATE = """date: {date}
prev: {prev}
title: {title}
url: {url}
style: /2019/style
"""
class GrangeRenderer(mistune.HTMLRenderer):
"""Adjusted renderer for La Grange."""
def get_img_size(self, image_path):
"""extract width and height of an image."""
full_path = ROOT + image_path
try:
with Image.open(full_path) as im:
return im.size
except FileNotFoundError as e:
print('TOFIX: Image file path incorrect')
sys.exit(f' {e}')
def image(self, src, alt="", title=None):
width, height = self.get_img_size(src)
if title:
return dedent(f"""
<figure>
<img src="{src}"
alt="{alt}"
width="{width}" height="{height}" />
<figcaption>{title}</figcaption>
</figure>
""")
else:
s = f'<img src="{src}" alt="{alt}" width="{width}" height="{height}" />' # noqa
return s
def paragraph(self, text):
# In case of a figure, we do not want the (non-standard) paragraph.
# david larlet's code idea
if text.strip().startswith("<figure>"):
return text
return f"<p>{text}</p>\n"
def parse(text):
"""Parse the given text into metadata and strip it for a Markdown parser.
:param text: text to be parsed
"""
rv = {}
m = META.match(text)
while m:
key = m.group(1)
value = m.group(2)
value = INDENTATION.sub('\n', value.strip())
if not value:
sys.exit("ERROR: Some meta are missing")
rv[key] = value
text = text[len(m.group(0)):]
m = META.match(text)
return rv, text.lstrip()
def get_draft(entry_path):
"""Read the draft.
It returns a tuple with:
- meta: dict
- text: str
"""
try:
with open(entry_path) as entry:
text = entry.read()
except FileNotFoundError as e:
print('TOFIX: draft file path incorrect')
sys.exit(f' {e}')
else:
return parse(text)
def add_id(html_text):
"""Post process to add certain ids."""
# Add id to links section
html_text = html_text.replace(
'<h2>sur le bord du chemin</h2>',
'<h2 id="links">sur le bord du chemin</h2>')
return html_text
def main():
"""Main workflow."""
locale.setlocale(locale.LC_ALL, 'fr_FR')
import argparse
from pathlib import Path
parser = argparse.ArgumentParser()
parser.add_argument("file_path", type=Path)
p = parser.parse_args()
entry_path = p.file_path
template_path = f'{ROOT}/2019/12/04/article_tmpl.html'
with open(template_path) as tmpfile:
blog_tmp = tmpfile.read()
# Read the draft post
meta, markdown_text = get_draft(entry_path)
pprint(meta)
prev_url = meta['prev']
# Read the previous blog entry
with open(ROOT + prev_url + '.html') as prev_entry:
from bs4 import BeautifulSoup
text_prev = prev_entry.read()
htmldata = BeautifulSoup(text_prev, features="lxml")
prev_title = htmldata.find('title').text
prev_title = prev_title.replace(' - Carnets Web de La Grange', '')
# Meta extraction
# Created
created_timestamp = '{datestr}T23:59:59+09:00'.format(datestr=meta['date'])
d = datetime.fromisoformat(meta['date'])
day = d.day
day_path = f"{d:%d}"
year = d.year
month = f"{d:%m}"
month_name = f"{d:%B}"
# special rendering
renderer = GrangeRenderer()
markdown = mistune.create_markdown(
renderer=renderer, plugins=['strikethrough'], escape=False)
html_text = markdown(markdown_text)
# Post processing of markdown text
html_text = add_id(html_text)
# metadata
metadata = {
'title': meta['title'],
'created_timestamp': created_timestamp,
'day': day,
'year': year,
'month': month,
'month_name': month_name,
'updated_timestamp': created_timestamp,
'updated': meta['date'],
'prev_url': meta['prev'],
'prev_title': prev_title,
'post_text': html_text,
'day_path': day_path,
'url': meta['url'],
'stylepath': meta['style'],
}
blog_post = blog_tmp.format(**metadata)
dest = ROOT + '/{year}/{month}/{day_path}/{url}.html'.format(**metadata)
print(dest)
with open(dest, 'w') as blogpost:
blogpost.write(blog_post)
def extract_date(path):
full_date = PATH.match(path)
return '-'.join(full_date.groups())
if __name__ == "__main__":
main()
| mit |
alsrgv/tensorflow | tensorflow/contrib/distributions/python/ops/vector_laplace_linear_operator.py | 21 | 11027 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Vectorized Laplace distribution class, directly using LinearOperator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import laplace
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.util import deprecation
__all__ = [
"VectorLaplaceLinearOperator"
]
_mvn_sample_note = """
`value` is a batch vector with compatible shape if `value` is a `Tensor` whose
shape can be broadcast up to either:
```python
self.batch_shape + self.event_shape
```
or
```python
[M1, ..., Mm] + self.batch_shape + self.event_shape
```
"""
class VectorLaplaceLinearOperator(
transformed_distribution.TransformedDistribution):
"""The vectorization of the Laplace distribution on `R^k`.
The vector laplace distribution is defined over `R^k`, and parameterized by
a (batch of) length-`k` `loc` vector (the means) and a (batch of) `k x k`
`scale` matrix: `covariance = 2 * scale @ scale.T`, where `@` denotes
matrix-multiplication.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; loc, scale) = exp(-||y||_1) / Z,
y = inv(scale) @ (x - loc),
Z = 2**k |det(scale)|,
```
where:
* `loc` is a vector in `R^k`,
* `scale` is a linear operator in `R^{k x k}`, `cov = scale @ scale.T`,
* `Z` denotes the normalization constant, and,
* `||y||_1` denotes the `l1` norm of `y`, `sum_i |y_i|.
The VectorLaplace distribution is a member of the [location-scale
family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X = (X_1, ..., X_k), each X_i ~ Laplace(loc=0, scale=1)
Y = (Y_1, ...,Y_k) = scale @ X + loc
```
#### About `VectorLaplace` and `Vector` distributions in TensorFlow.
The `VectorLaplace` is a non-standard distribution that has useful properties.
The marginals `Y_1, ..., Y_k` are *not* Laplace random variables, due to
the fact that the sum of Laplace random variables is not Laplace.
Instead, `Y` is a vector whose components are linear combinations of Laplace
random variables. Thus, `Y` lives in the vector space generated by `vectors`
of Laplace distributions. This allows the user to decide the mean and
covariance (by setting `loc` and `scale`), while preserving some properties of
the Laplace distribution. In particular, the tails of `Y_i` will be (up to
polynomial factors) exponentially decaying.
To see this last statement, note that the pdf of `Y_i` is the convolution of
the pdf of `k` independent Laplace random variables. One can then show by
induction that distributions with exponential (up to polynomial factors) tails
are closed under convolution.
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single 3-variate VectorLaplace with some desired covariance.
mu = [1., 2, 3]
cov = [[ 0.36, 0.12, 0.06],
[ 0.12, 0.29, -0.13],
[ 0.06, -0.13, 0.26]]
scale = tf.cholesky(cov)
# ==> [[ 0.6, 0. , 0. ],
# [ 0.2, 0.5, 0. ],
# [ 0.1, -0.3, 0.4]])
# Divide scale by sqrt(2) so that the final covariance will be what we want.
vla = tfd.VectorLaplaceLinearOperator(
loc=mu,
scale=tf.linalg.LinearOperatorLowerTriangular(scale / tf.sqrt(2.)))
# Covariance agrees with cholesky(cov) parameterization.
vla.covariance().eval()
# ==> [[ 0.36, 0.12, 0.06],
# [ 0.12, 0.29, -0.13],
# [ 0.06, -0.13, 0.26]]
# Compute the pdf of an`R^3` observation; return a scalar.
vla.prob([-1., 0, 1]).eval() # shape: []
# Initialize a 2-batch of 3-variate Vector Laplace's.
mu = [[1., 2, 3],
[11, 22, 33]] # shape: [2, 3]
scale_diag = [[1., 2, 3],
[0.5, 1, 1.5]] # shape: [2, 3]
vla = tfd.VectorLaplaceLinearOperator(
loc=mu,
scale=tf.linalg.LinearOperatorDiag(scale_diag))
# Compute the pdf of two `R^3` observations; return a length-2 vector.
x = [[-0.9, 0, 0.1],
[-10, 0, 9]] # shape: [2, 3]
vla.prob(x).eval() # shape: [2]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc=None,
scale=None,
validate_args=False,
allow_nan_stats=True,
name="VectorLaplaceLinearOperator"):
"""Construct Vector Laplace distribution on `R^k`.
The `batch_shape` is the broadcast shape between `loc` and `scale`
arguments.
The `event_shape` is given by last dimension of the matrix implied by
`scale`. The last dimension of `loc` (if provided) must broadcast with this.
Recall that `covariance = 2 * scale @ scale.T`.
Additional leading dimensions (if any) will index batches.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
scale: Instance of `LinearOperator` with same `dtype` as `loc` and shape
`[B1, ..., Bb, k, k]`.
validate_args: Python `bool`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: Python `bool`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
ValueError: if `scale` is unspecified.
TypeError: if not `scale.dtype.is_floating`
"""
parameters = dict(locals())
if scale is None:
raise ValueError("Missing required `scale` parameter.")
if not scale.dtype.is_floating:
raise TypeError("`scale` parameter must have floating-point dtype.")
with ops.name_scope(name, values=[loc] + scale.graph_parents):
# Since expand_dims doesn't preserve constant-ness, we obtain the
# non-dynamic value if possible.
loc = ops.convert_to_tensor(loc, name="loc") if loc is not None else loc
batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
loc, scale)
super(VectorLaplaceLinearOperator, self).__init__(
distribution=laplace.Laplace(
loc=array_ops.zeros([], dtype=scale.dtype),
scale=array_ops.ones([], dtype=scale.dtype)),
bijector=bijectors.AffineLinearOperator(
shift=loc, scale=scale, validate_args=validate_args),
batch_shape=batch_shape,
event_shape=event_shape,
validate_args=validate_args,
name=name)
self._parameters = parameters
@property
def loc(self):
"""The `loc` `Tensor` in `Y = scale @ X + loc`."""
return self.bijector.shift
@property
def scale(self):
"""The `scale` `LinearOperator` in `Y = scale @ X + loc`."""
return self.bijector.scale
@distribution_util.AppendDocstring(_mvn_sample_note)
def _log_prob(self, x):
return super(VectorLaplaceLinearOperator, self)._log_prob(x)
@distribution_util.AppendDocstring(_mvn_sample_note)
def _prob(self, x):
return super(VectorLaplaceLinearOperator, self)._prob(x)
def _mean(self):
shape = self.batch_shape.concatenate(self.event_shape)
has_static_shape = shape.is_fully_defined()
if not has_static_shape:
shape = array_ops.concat([
self.batch_shape_tensor(),
self.event_shape_tensor(),
], 0)
if self.loc is None:
return array_ops.zeros(shape, self.dtype)
if has_static_shape and shape == self.loc.get_shape():
return array_ops.identity(self.loc)
# Add dummy tensor of zeros to broadcast. This is only necessary if shape
# != self.loc.shape, but we could not determine if this is the case.
return array_ops.identity(self.loc) + array_ops.zeros(shape, self.dtype)
def _covariance(self):
# Let
# W = (w1,...,wk), with wj ~ iid Laplace(0, 1).
# Then this distribution is
# X = loc + LW,
# and since E[X] = loc,
# Cov(X) = E[LW W^T L^T] = L E[W W^T] L^T.
# Since E[wi wj] = 0 if i != j, and 2 if i == j, we have
# Cov(X) = 2 LL^T
if distribution_util.is_diagonal_scale(self.scale):
return 2. * array_ops.matrix_diag(math_ops.square(self.scale.diag_part()))
else:
return 2. * self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)
def _variance(self):
if distribution_util.is_diagonal_scale(self.scale):
return 2. * math_ops.square(self.scale.diag_part())
elif (isinstance(self.scale, linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
return array_ops.matrix_diag_part(
2. * self.scale.matmul(self.scale.to_dense()))
else:
return 2. * array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense(), adjoint_arg=True))
def _stddev(self):
if distribution_util.is_diagonal_scale(self.scale):
return np.sqrt(2) * math_ops.abs(self.scale.diag_part())
elif (isinstance(self.scale, linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
return np.sqrt(2) * math_ops.sqrt(array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense())))
else:
return np.sqrt(2) * math_ops.sqrt(array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)))
def _mode(self):
return self._mean()
| apache-2.0 |
xsynergy510x/android_external_chromium_org | tools/telemetry/telemetry/page/page_set_archive_info.py | 26 | 6409 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import os
import re
import shutil
import tempfile
from telemetry.util import cloud_storage
class PageSetArchiveInfo(object):
def __init__(self, file_path, data, ignore_archive=False):
self._file_path = file_path
self._base_dir = os.path.dirname(file_path)
# Ensure directory exists.
if not os.path.exists(self._base_dir):
os.makedirs(self._base_dir)
# Download all .wpr files.
if not ignore_archive:
for archive_path in data['archives']:
archive_path = self._WprFileNameToPath(archive_path)
try:
cloud_storage.GetIfChanged(archive_path)
except (cloud_storage.CredentialsError, cloud_storage.PermissionError):
if os.path.exists(archive_path):
# If the archive exists, assume the user recorded their own and
# simply warn.
logging.warning('Need credentials to update WPR archive: %s',
archive_path)
# Map from the relative path (as it appears in the metadata file) of the
# .wpr file to a list of page names it supports.
self._wpr_file_to_page_names = data['archives']
# Map from the page name to a relative path (as it appears in the metadata
# file) of the .wpr file.
self._page_name_to_wpr_file = dict()
# Find out the wpr file names for each page.
for wpr_file in data['archives']:
page_names = data['archives'][wpr_file]
for page_name in page_names:
self._page_name_to_wpr_file[page_name] = wpr_file
self.temp_target_wpr_file_path = None
@classmethod
def FromFile(cls, file_path, ignore_archive=False):
if os.path.exists(file_path):
with open(file_path, 'r') as f:
data = json.load(f)
return cls(file_path, data, ignore_archive=ignore_archive)
return cls(file_path, {'archives': {}}, ignore_archive=ignore_archive)
def WprFilePathForPage(self, page):
if self.temp_target_wpr_file_path:
return self.temp_target_wpr_file_path
wpr_file = self._page_name_to_wpr_file.get(page.display_name, None)
if wpr_file is None:
# Some old page sets always use the URL to identify a page rather than the
# display_name, so try to look for that.
wpr_file = self._page_name_to_wpr_file.get(page.url, None)
if wpr_file:
return self._WprFileNameToPath(wpr_file)
return None
def AddNewTemporaryRecording(self, temp_wpr_file_path=None):
if temp_wpr_file_path is None:
temp_wpr_file_handle, temp_wpr_file_path = tempfile.mkstemp()
os.close(temp_wpr_file_handle)
self.temp_target_wpr_file_path = temp_wpr_file_path
def AddRecordedPages(self, pages):
if not pages:
os.remove(self.temp_target_wpr_file_path)
return
(target_wpr_file, target_wpr_file_path) = self._NextWprFileName()
for page in pages:
self._SetWprFileForPage(page.display_name, target_wpr_file)
shutil.move(self.temp_target_wpr_file_path, target_wpr_file_path)
# Update the hash file.
with open(target_wpr_file_path + '.sha1', 'wb') as f:
f.write(cloud_storage.CalculateHash(target_wpr_file_path))
f.flush()
self._WriteToFile()
self._DeleteAbandonedWprFiles()
def _DeleteAbandonedWprFiles(self):
# Update the metadata so that the abandoned wpr files don't have empty page
# name arrays.
abandoned_wpr_files = self._AbandonedWprFiles()
for wpr_file in abandoned_wpr_files:
del self._wpr_file_to_page_names[wpr_file]
# Don't fail if we're unable to delete some of the files.
wpr_file_path = self._WprFileNameToPath(wpr_file)
try:
os.remove(wpr_file_path)
except Exception:
logging.warning('Failed to delete file: %s' % wpr_file_path)
def _AbandonedWprFiles(self):
abandoned_wpr_files = []
for wpr_file, page_names in self._wpr_file_to_page_names.iteritems():
if not page_names:
abandoned_wpr_files.append(wpr_file)
return abandoned_wpr_files
def _WriteToFile(self):
"""Writes the metadata into the file passed as constructor parameter."""
metadata = dict()
metadata['description'] = (
'Describes the Web Page Replay archives for a page set. Don\'t edit by '
'hand! Use record_wpr for updating.')
metadata['archives'] = self._wpr_file_to_page_names.copy()
# Don't write data for abandoned archives.
abandoned_wpr_files = self._AbandonedWprFiles()
for wpr_file in abandoned_wpr_files:
del metadata['archives'][wpr_file]
with open(self._file_path, 'w') as f:
json.dump(metadata, f, indent=4)
f.flush()
def _WprFileNameToPath(self, wpr_file):
return os.path.abspath(os.path.join(self._base_dir, wpr_file))
def _NextWprFileName(self):
"""Creates a new file name for a wpr archive file."""
# The names are of the format "some_thing_number.wpr". Read the numbers.
highest_number = -1
base = None
for wpr_file in self._wpr_file_to_page_names:
match = re.match(r'(?P<BASE>.*)_(?P<NUMBER>[0-9]+)\.wpr', wpr_file)
if not match:
raise Exception('Illegal wpr file name ' + wpr_file)
highest_number = max(int(match.groupdict()['NUMBER']), highest_number)
if base and match.groupdict()['BASE'] != base:
raise Exception('Illegal wpr file name ' + wpr_file +
', doesn\'t begin with ' + base)
base = match.groupdict()['BASE']
if not base:
# If we're creating a completely new info file, use the base name of the
# page set file.
base = os.path.splitext(os.path.basename(self._file_path))[0]
new_filename = '%s_%03d.wpr' % (base, highest_number + 1)
return new_filename, self._WprFileNameToPath(new_filename)
def _SetWprFileForPage(self, page_name, wpr_file):
"""For modifying the metadata when we're going to record a new archive."""
old_wpr_file = self._page_name_to_wpr_file.get(page_name, None)
if old_wpr_file:
self._wpr_file_to_page_names[old_wpr_file].remove(page_name)
self._page_name_to_wpr_file[page_name] = wpr_file
if wpr_file not in self._wpr_file_to_page_names:
self._wpr_file_to_page_names[wpr_file] = []
self._wpr_file_to_page_names[wpr_file].append(page_name)
| bsd-3-clause |
kzky/python-online-machine-learning-library | pa/passive_aggressive_2.py | 1 | 6472 | import numpy as np
import scipy as sp
import logging as logger
import time
import pylab as pl
from collections import defaultdict
from sklearn.metrics import confusion_matrix
class PassiveAggressiveII(object):
"""
Passive Aggressive-II algorithm: squared hinge loss PA.
References:
- http://jmlr.org/papers/volume7/crammer06a/crammer06a.pdf
This model is only applied to binary classification.
"""
def __init__(self, fname, delimiter = " ", C = 1, n_scan = 10):
"""
model initialization.
"""
logger.basicConfig(level=logger.DEBUG)
logger.info("init starts")
self.n_scan = 10
self.data = defaultdict()
self.model = defaultdict()
self.cache = defaultdict()
self._load(fname, delimiter)
self._init_model(C)
logger.info("init finished")
def _load(self, fname, delimiter = " "):
"""
Load data set specified with filename.
data format must be as follows (space-separated file as default),
l_1 x_11 x_12 x_13 ... x_1m
l_2 x_21 x_22 ... x_2m
...
l_n x_n1 x_n2 ... x_nm
l_i must be {1, -1} because of binary classifier.
Arguments:
- `fname`: file name.
- `delimiter`: delimiter of a file.
"""
logger.info("load data starts")
# load data
self.data["data"] = np.loadtxt(fname, delimiter = delimiter)
self.data["n_sample"] = self.data["data"].shape[0]
self.data["f_dim"] = self.data["data"].shape[1] - 1
# binalize
self._binalize(self.data["data"])
# normalize
self.normalize(self.data["data"][:, 1:])
logger.info("load data finished")
def _binalize(self, data):
"""
Binalize label of data.
Arguments:
- `data`: dataset.
"""
logger.info("init starts")
# binary check
labels = data[:, 0]
classes = np.unique(labels)
if classes.size != 2:
print "label must be a binary value."
exit(1)
# convert binary lables to {1, -1}
for i in xrange(labels.size):
if labels[i] == classes[0]:
labels[i] = 1
else:
labels[i] = -1
# set classes
self.data["classes"] = classes
logger.info("init finished")
def normalize(self, samples):
"""
nomalize sample, such that sqrt(x^2) = 1
Arguments:
- `samples`: dataset without labels.
"""
logger.info("normalize starts")
for i in xrange(0, self.data["n_sample"]):
samples[i, :] = self._normalize(samples[i, :])
logger.info("normalize finished")
def _normalize(self, sample):
norm = np.sqrt(sample.dot(sample))
sample = sample/norm
return sample
def _init_model(self, C):
"""
Initialize model.
"""
logger.info("init model starts")
self.model["w"] = np.ndarray(self.data["f_dim"] + 1) # model paremter
self.model["C"] = C # aggressive parameter
logger.info("init model finished")
def _learn(self, ):
"""
Learn internally.
"""
def _update(self, label, sample, margin):
"""
Update model parameter internally.
update rule is as follows,
w = w + y (1 - m)/(||x||_2^2 + C) * x
Arguments:
- `label`: label = {1, -1}
- `sample`: sample, or feature vector
"""
# add bias
sample = self._add_bias(sample)
norm = sample.dot(sample)
w = self.model["w"] + label * (1 - margin)/(norm + self.model["C"]) * sample
self.model["w"] = w
def _predict_value(self, sample):
"""
predict value of \w^T * x
Arguments:
- `sample`:
"""
return self.model["w"].dot(self._add_bias(sample))
def _add_bias(self, sample):
return np.hstack((sample, 1))
def learn(self, ):
"""
Learn.
"""
logger.info("learn starts")
data = self.data["data"]
# learn
for i in xrange(0, self.n_scan):
for i in xrange(0, self.data["n_sample"]):
sample = data[i, 1:]
label = data[i, 0]
pred_val = self._predict_value(sample)
margin = label * pred_val
if margin < 1:
self._update(label, sample, margin)
logger.info("learn finished")
def predict(self, sample):
"""
predict {1, -1} base on \w^T * x
Arguments:
- `sample`:
"""
pred_val = self._predict_value(sample)
self.cache["pred_val"] = pred_val
if pred_val >=0:
return 1
else:
return -1
def update(self, label, sample):
"""
update model.
Arguments:
- `sample`: sample, or feature vector
- `pred_val`: predicted value i.e., w^T * sample
"""
margin = label * self.model["pred_val"]
if margin < 1:
_update(label, sample, margin)
@classmethod
def examplify(cls, fname, delimiter = " ", C = 1 , n_scan = 3):
"""
Example of how to use
"""
# learn
st = time.time()
model = PassiveAggressiveII(fname, delimiter, C , n_scan)
model.learn()
et = time.time()
print "learning time: %f[s]" % (et - st)
# predict (after learning)
data = np.loadtxt(fname, delimiter = " ")
model._binalize(data)
n_sample = data.shape[0]
y_label = data[:, 0]
y_pred = np.ndarray(n_sample)
for i in xrange(0, n_sample):
sample = data[i, 1:]
y_pred[i] = model.predict(sample)
# show result
cm = confusion_matrix(y_label, y_pred)
print cm
print "accurary: %d [%%]" % (np.sum(cm.diagonal()) * 100.0/np.sum(cm))
if __name__ == '__main__':
fname = "/home/kzk/datasets/uci_csv/liver.csv"
#fname = "/home/kzk/datasets/uci_csv/ad.csv"
print "dataset is", fname
PassiveAggressiveII.examplify(fname, delimiter = " ", C = 1, n_scan = 100)
| bsd-3-clause |
divergentdave/inspectors-general | inspectors/nea.py | 2 | 6711 | #!/usr/bin/env python
import datetime
import logging
import os
from urllib.parse import urljoin
from utils import utils, inspector, admin
# http://arts.gov/oig
archive = 2005
# options:
# standard since/year options for a year range to fetch from.
# report_id: only bother to process a single report
#
# Notes for IG's web team:
# - Fix MISSING_IDS
AUDIT_REPORTS_URL = "http://arts.gov/oig/reports/audits"
SPECIAL_REVIEWS_URL = "http://arts.gov/oig/reports/specials"
SEMIANNUAL_REPORTS_URL = "http://arts.gov/oig/reports/semi-annual"
PEER_REVIEWS_URL = "http://arts.gov/oig/reports/external-peer-reviews"
FISMA_REPORTS_URL = "http://arts.gov/inspector-general/reports/internal-reviews"
REPORT_URLS = {
"audit": AUDIT_REPORTS_URL,
"evaluation": SPECIAL_REVIEWS_URL,
"semiannual_report": SEMIANNUAL_REPORTS_URL,
"peer_review": PEER_REVIEWS_URL,
"fisma": FISMA_REPORTS_URL,
}
MISSING_IDS = [
"EA-perimeter-security-test-reload",
]
REPORT_PUBLISHED_MAP = {
"2013-Peer-Review": datetime.datetime(2013, 12, 13),
"2010-Peer-Review": datetime.datetime(2010, 8, 30),
"2007-Peer-Review": datetime.datetime(2007, 3, 28),
"mississippi-limited-audit-revised": datetime.datetime(2015, 11, 3),
"maaf-final-report": datetime.datetime(2015, 5, 6),
"louisiana-final-audit": datetime.datetime(2014, 12, 22),
"DCCAH-Final-Report": datetime.datetime(2013, 9, 23),
"MN-State-Arts-Board-LSA": datetime.datetime(2013, 3, 15),
"MTG-LS-redacted": datetime.datetime(2013, 3, 1),
"AMW-LSA-Final-Report": datetime.datetime(2013, 1, 11),
"APAP-LSA-Report-080312": datetime.datetime(2012, 8, 3),
"Illinois-Arts-Council-Report": datetime.datetime(2012, 4, 4),
"American-Samoa": datetime.datetime(2011, 7, 15),
"MSAC_Report_1": datetime.datetime(2011, 7, 25),
"Family-Resources-Evaluation-Report": datetime.datetime(2009, 10, 30),
"Virginia-Commission": datetime.datetime(2009, 8, 12),
"Wisconsin-Arts-Board-Final-Report": datetime.datetime(2009, 6, 15),
"PCA-Final-Report_0": datetime.datetime(2009, 4, 3),
"hrac-final-debarment-report-5-13-2015": datetime.datetime(2015, 5, 13),
"northwest-heritage-resources-final-report": datetime.datetime(2014, 11, 19),
"2015-confluences-final-report": datetime.datetime(2014, 10, 20),
"State-Education-Agency-DIrectors-SCE-07-14": datetime.datetime(2014, 7, 16),
"Academy-of-American-Poets-SCE-7-14": datetime.datetime(2014, 7, 10),
"Lincoln-Center-Final-SCE": datetime.datetime(2014, 5, 28),
"American-Documentary-SCE-14-02": datetime.datetime(2014, 5, 19),
"BRIC-Arts-SCE-3-25-14": datetime.datetime(2014, 3, 25),
"Philadelphia-Orchestra-Association": datetime.datetime(2013, 3, 27),
"Greater-Philadelphia-Alliance": datetime.datetime(2013, 2, 7),
"FA-Report-NFT-Redacted": datetime.datetime(2013, 8, 28),
"mtg-report-disposition-closeout-11-14": datetime.datetime(2013, 6, 5),
"AFTA": datetime.datetime(2012, 9, 4),
"SAH": datetime.datetime(2012, 7, 9),
"APAP-Evaluation": datetime.datetime(2012, 6, 20),
"DCASE": datetime.datetime(2012, 5, 1),
"NBM": datetime.datetime(2011, 10, 24),
"BSO": datetime.datetime(2011, 9, 7),
"DSOHSCE": datetime.datetime(2010, 8, 5),
"Mosaic": datetime.datetime(2010, 4, 30),
"UMS": datetime.datetime(2010, 1, 28),
"gulf-coast-youth-choirs": datetime.datetime(2009, 9, 30),
"michigan-opera-theater": datetime.datetime(2009, 9, 30),
"Florida-Orchestra-Report": datetime.datetime(2009, 9, 28),
"artsandculturalaffairsweb": datetime.datetime(2009, 9, 23),
"Sphinx-Organization": datetime.datetime(2009, 9, 23),
"VirginIslandEvaluationReport": datetime.datetime(2009, 3, 25),
"WoodlandPatternEvaluationReport": datetime.datetime(2008, 10, 8),
"VSAEvaluationReport": datetime.datetime(2008, 10, 7),
"TricklockEvaluationReport": datetime.datetime(2008, 10, 6),
"LosReyesEvaluationReport": datetime.datetime(2008, 10, 2),
"MusicTheatreGroup-Redacted-2008": datetime.datetime(2007, 11, 21),
"LS-16-02-NASAA-Final-Report": datetime.datetime(2016, 2, 29),
"Letter-of-Comment-NEA-01-27-17": datetime.datetime(2017, 1, 27),
"Art-21-Report-SCE-17-01": datetime.datetime(2017, 7, 27),
}
def run(options):
year_range = inspector.year_range(options, archive)
only_report_id = options.get('report_id')
# Pull the reports
for report_type, url in sorted(REPORT_URLS.items()):
doc = utils.beautifulsoup_from_url(url)
results = doc.select("div.field-item li")
if not results:
results = doc.select("div.field-item tr")
if not results:
raise inspector.NoReportsFoundError("National Endowment for the Arts (%s)" % report_type)
for result in results:
report = report_from(result, url, report_type, year_range)
if report:
# debugging convenience: can limit to single report
if only_report_id and (report['report_id'] != only_report_id):
continue
inspector.save_report(report)
def report_from(result, landing_url, report_type, year_range):
link = result.find("a")
if not link:
return
title = link.text
report_url = urljoin(landing_url, link.get('href'))
report_filename = report_url.split("/")[-1]
report_id, _ = os.path.splitext(report_filename)
published_on = None
try:
published_on_text = result.select("td")[1].text.strip()
published_on = datetime.datetime.strptime(published_on_text, '%m/%d/%y')
except (ValueError, IndexError):
pass
try:
published_on_text = result.select("td")[1].text.strip()
published_on = datetime.datetime.strptime(published_on_text, '%m/%d/%Y')
except (ValueError, IndexError):
pass
if not published_on:
try:
published_on_text = title.split("-")[-1].split("–")[-1].strip()
published_on = datetime.datetime.strptime(published_on_text, '%B %d, %Y')
except ValueError:
pass
if not published_on:
if report_id in REPORT_PUBLISHED_MAP:
published_on = REPORT_PUBLISHED_MAP[report_id]
if not published_on:
admin.log_no_date("nea", report_id, title, report_url)
return
if published_on.year not in year_range:
logging.debug("[%s] Skipping, not in requested range." % report_url)
return
report = {
'inspector': 'nea',
'inspector_url': 'http://arts.gov/oig',
'agency': 'nea',
'agency_name': 'National Endowment for the Arts',
'type': report_type,
'landing_url': landing_url,
'report_id': report_id,
'url': report_url,
'title': title,
'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"),
}
if report_id in MISSING_IDS:
report['unreleased'] = True
report['missing'] = True
report['url'] = None
return report
utils.run(run) if (__name__ == "__main__") else None
| cc0-1.0 |
kiith-sa/QGIS | python/plugins/processing/admintools/httplib2/__init__.py | 43 | 70449 | # -*- coding: utf-8 -*-
"""
***************************************************************************
__init__.py
---------------------
Date : November 2006
Copyright : (C) 2012 by Joe Gregorio
Email : joe at bitworking dot org
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from __future__ import generators
"""
httplib2
A caching http interface that supports ETags and gzip
to conserve bandwidth.
Requires Python 2.3 or later
Changelog:
2007-08-18, Rick: Modified so it's able to use a socks proxy if needed.
"""
__author__ = "Joe Gregorio ([email protected])"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = ["Thomas Broyer ([email protected])",
"James Antill",
"Xavier Verges Farrero",
"Jonathan Feinberg",
"Blair Zajac",
"Sam Ruby",
"Louis Nyffenegger"]
__license__ = "MIT"
__version__ = "0.7.6"
import re
import sys
import email
import email.Utils
import email.Message
import email.FeedParser
import StringIO
import gzip
import zlib
import httplib
import urlparse
import urllib
import base64
import os
import copy
import calendar
import time
import random
import errno
try:
from hashlib import sha1 as _sha, md5 as _md5
except ImportError:
# prior to Python 2.5, these were separate modules
import sha
import md5
_sha = sha.new
_md5 = md5.new
import hmac
from gettext import gettext as _
import socket
try:
from httplib2 import socks
except ImportError:
try:
import socks
except ImportError:
socks = None
# Build the appropriate socket wrapper for ssl
try:
import ssl # python 2.6
ssl_SSLError = ssl.SSLError
def _ssl_wrap_socket(sock, key_file, cert_file,
disable_validation, ca_certs):
if disable_validation:
cert_reqs = ssl.CERT_NONE
else:
cert_reqs = ssl.CERT_REQUIRED
# We should be specifying SSL version 3 or TLS v1, but the ssl module
# doesn't expose the necessary knobs. So we need to go with the default
# of SSLv23.
return ssl.wrap_socket(sock, keyfile=key_file, certfile=cert_file,
cert_reqs=cert_reqs, ca_certs=ca_certs)
except (AttributeError, ImportError):
ssl_SSLError = None
def _ssl_wrap_socket(sock, key_file, cert_file,
disable_validation, ca_certs):
if not disable_validation:
raise CertificateValidationUnsupported(
"SSL certificate validation is not supported without "
"the ssl module installed. To avoid this error, install "
"the ssl module, or explicity disable validation.")
ssl_sock = socket.ssl(sock, key_file, cert_file)
return httplib.FakeSocket(sock, ssl_sock)
if sys.version_info >= (2,3):
from iri2uri import iri2uri
else:
def iri2uri(uri):
return uri
def has_timeout(timeout): # python 2.6
if hasattr(socket, '_GLOBAL_DEFAULT_TIMEOUT'):
return (timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT)
return (timeout is not None)
__all__ = ['Http', 'Response', 'ProxyInfo', 'HttpLib2Error',
'RedirectMissingLocation', 'RedirectLimit', 'FailedToDecompressContent',
'UnimplementedDigestAuthOptionError', 'UnimplementedHmacDigestAuthOptionError',
'debuglevel', 'ProxiesUnavailableError']
# The httplib debug level, set to a non-zero value to get debug output
debuglevel = 0
# A request will be tried 'RETRIES' times if it fails at the socket/connection level.
RETRIES = 2
# Python 2.3 support
if sys.version_info < (2,4):
def sorted(seq):
seq.sort()
return seq
# Python 2.3 support
def HTTPResponse__getheaders(self):
"""Return list of (header, value) tuples."""
if self.msg is None:
raise httplib.ResponseNotReady()
return self.msg.items()
if not hasattr(httplib.HTTPResponse, 'getheaders'):
httplib.HTTPResponse.getheaders = HTTPResponse__getheaders
# All exceptions raised here derive from HttpLib2Error
class HttpLib2Error(Exception): pass
# Some exceptions can be caught and optionally
# be turned back into responses.
class HttpLib2ErrorWithResponse(HttpLib2Error):
def __init__(self, desc, response, content):
self.response = response
self.content = content
HttpLib2Error.__init__(self, desc)
class RedirectMissingLocation(HttpLib2ErrorWithResponse): pass
class RedirectLimit(HttpLib2ErrorWithResponse): pass
class FailedToDecompressContent(HttpLib2ErrorWithResponse): pass
class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class MalformedHeader(HttpLib2Error): pass
class RelativeURIError(HttpLib2Error): pass
class ServerNotFoundError(HttpLib2Error): pass
class ProxiesUnavailableError(HttpLib2Error): pass
class CertificateValidationUnsupported(HttpLib2Error): pass
class SSLHandshakeError(HttpLib2Error): pass
class NotSupportedOnThisPlatform(HttpLib2Error): pass
class CertificateHostnameMismatch(SSLHandshakeError):
def __init__(self, desc, host, cert):
HttpLib2Error.__init__(self, desc)
self.host = host
self.cert = cert
# Open Items:
# -----------
# Proxy support
# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?)
# Pluggable cache storage (supports storing the cache in
# flat files by default. We need a plug-in architecture
# that can support Berkeley DB and Squid)
# == Known Issues ==
# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator.
# Does not handle Cache-Control: max-stale
# Does not use Age: headers when calculating cache freshness.
# The number of redirections to follow before giving up.
# Note that only GET redirects are automatically followed.
# Will also honor 301 requests by saving that info and never
# requesting that URI again.
DEFAULT_MAX_REDIRECTS = 5
# Default CA certificates file bundled with httplib2.
CA_CERTS = os.path.join(
os.path.dirname(os.path.abspath(__file__ )), "cacerts.txt")
# Which headers are hop-by-hop headers by default
HOP_BY_HOP = ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade']
def _get_end2end_headers(response):
hopbyhop = list(HOP_BY_HOP)
hopbyhop.extend([x.strip() for x in response.get('connection', '').split(',')])
return [header for header in response.keys() if header not in hopbyhop]
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
def parse_uri(uri):
"""Parses a URI using the regex given in Appendix B of RFC 3986.
(scheme, authority, path, query, fragment) = parse_uri(uri)
"""
groups = URI.match(uri).groups()
return (groups[1], groups[3], groups[4], groups[6], groups[8])
def urlnorm(uri):
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri)
authority = authority.lower()
scheme = scheme.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
scheme = scheme.lower()
defrag_uri = scheme + "://" + authority + request_uri
return scheme, authority, request_uri, defrag_uri
# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/)
re_url_scheme = re.compile(r'^\w+://')
re_slash = re.compile(r'[?/:|]+')
def safename(filename):
"""Return a filename suitable for the cache.
Strips dangerous and common characters to create a filename we
can use to store the cache in.
"""
try:
if re_url_scheme.match(filename):
if isinstance(filename,str):
filename = filename.decode('utf-8')
filename = filename.encode('idna')
else:
filename = filename.encode('idna')
except UnicodeError:
pass
if isinstance(filename,unicode):
filename=filename.encode('utf-8')
filemd5 = _md5(filename).hexdigest()
filename = re_url_scheme.sub("", filename)
filename = re_slash.sub(",", filename)
# limit length of filename
if len(filename)>200:
filename=filename[:200]
return ",".join((filename, filemd5))
NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+')
def _normalize_headers(headers):
return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()])
def _parse_cache_control(headers):
retval = {}
if headers.has_key('cache-control'):
parts = headers['cache-control'].split(',')
parts_with_args = [tuple([x.strip().lower() for x in part.split("=", 1)]) for part in parts if -1 != part.find("=")]
parts_wo_args = [(name.strip().lower(), 1) for name in parts if -1 == name.find("=")]
retval = dict(parts_with_args + parts_wo_args)
return retval
# Whether to use a strict mode to parse WWW-Authenticate headers
# Might lead to bad results in case of ill-formed header value,
# so disabled by default, falling back to relaxed parsing.
# Set to true to turn on, useful for testing servers.
USE_WWW_AUTH_STRICT_PARSING = 0
# In regex below:
# [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP
# "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space
# Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both:
# \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x08\x0A-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?
WWW_AUTH_STRICT = re.compile(r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$")
WWW_AUTH_RELAXED = re.compile(r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(?<!\")[^ \t\r\n,]+(?!\"))\"?)(.*)$")
UNQUOTE_PAIRS = re.compile(r'\\(.)')
def _parse_www_authenticate(headers, headername='www-authenticate'):
"""Returns a dictionary of dictionaries, one dict
per auth_scheme."""
retval = {}
if headers.has_key(headername):
try:
authenticate = headers[headername].strip()
www_auth = USE_WWW_AUTH_STRICT_PARSING and WWW_AUTH_STRICT or WWW_AUTH_RELAXED
while authenticate:
# Break off the scheme at the beginning of the line
if headername == 'authentication-info':
(auth_scheme, the_rest) = ('digest', authenticate)
else:
(auth_scheme, the_rest) = authenticate.split(" ", 1)
# Now loop over all the key value pairs that come after the scheme,
# being careful not to roll into the next scheme
match = www_auth.search(the_rest)
auth_params = {}
while match:
if match and len(match.groups()) == 3:
(key, value, the_rest) = match.groups()
auth_params[key.lower()] = UNQUOTE_PAIRS.sub(r'\1', value) # '\\'.join([x.replace('\\', '') for x in value.split('\\\\')])
match = www_auth.search(the_rest)
retval[auth_scheme.lower()] = auth_params
authenticate = the_rest.strip()
except ValueError:
raise MalformedHeader("WWW-Authenticate")
return retval
def _entry_disposition(response_headers, request_headers):
"""Determine freshness from the Date, Expires and Cache-Control headers.
We don't handle the following:
1. Cache-Control: max-stale
2. Age: headers are not used in the calculations.
Not that this algorithm is simpler than you might think
because we are operating as a private (non-shared) cache.
This lets us ignore 's-maxage'. We can also ignore
'proxy-invalidate' since we aren't a proxy.
We will never return a stale document as
fresh as a design decision, and thus the non-implementation
of 'max-stale'. This also lets us safely ignore 'must-revalidate'
since we operate as if every server has sent 'must-revalidate'.
Since we are private we get to ignore both 'public' and
'private' parameters. We also ignore 'no-transform' since
we don't do any transformations.
The 'no-store' parameter is handled at a higher level.
So the only Cache-Control parameters we look at are:
no-cache
only-if-cached
max-age
min-fresh
"""
retval = "STALE"
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if request_headers.has_key('pragma') and request_headers['pragma'].lower().find('no-cache') != -1:
retval = "TRANSPARENT"
if 'cache-control' not in request_headers:
request_headers['cache-control'] = 'no-cache'
elif cc.has_key('no-cache'):
retval = "TRANSPARENT"
elif cc_response.has_key('no-cache'):
retval = "STALE"
elif cc.has_key('only-if-cached'):
retval = "FRESH"
elif response_headers.has_key('date'):
date = calendar.timegm(email.Utils.parsedate_tz(response_headers['date']))
now = time.time()
current_age = max(0, now - date)
if cc_response.has_key('max-age'):
try:
freshness_lifetime = int(cc_response['max-age'])
except ValueError:
freshness_lifetime = 0
elif response_headers.has_key('expires'):
expires = email.Utils.parsedate_tz(response_headers['expires'])
if None == expires:
freshness_lifetime = 0
else:
freshness_lifetime = max(0, calendar.timegm(expires) - date)
else:
freshness_lifetime = 0
if cc.has_key('max-age'):
try:
freshness_lifetime = int(cc['max-age'])
except ValueError:
freshness_lifetime = 0
if cc.has_key('min-fresh'):
try:
min_fresh = int(cc['min-fresh'])
except ValueError:
min_fresh = 0
current_age += min_fresh
if freshness_lifetime > current_age:
retval = "FRESH"
return retval
def _decompressContent(response, new_content):
content = new_content
try:
encoding = response.get('content-encoding', None)
if encoding in ['gzip', 'deflate']:
if encoding == 'gzip':
content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read()
if encoding == 'deflate':
content = zlib.decompress(content)
response['content-length'] = str(len(content))
# Record the historical presence of the encoding in a way the won't interfere.
response['-content-encoding'] = response['content-encoding']
del response['content-encoding']
except IOError:
content = ""
raise FailedToDecompressContent(_("Content purported to be compressed with %s but failed to decompress.") % response.get('content-encoding'), response, content)
return content
def _updateCache(request_headers, response_headers, content, cache, cachekey):
if cachekey:
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if cc.has_key('no-store') or cc_response.has_key('no-store'):
cache.delete(cachekey)
else:
info = email.Message.Message()
for key, value in response_headers.iteritems():
if key not in ['status','content-encoding','transfer-encoding']:
info[key] = value
# Add annotations to the cache to indicate what headers
# are variant for this request.
vary = response_headers.get('vary', None)
if vary:
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
try:
info[key] = request_headers[header]
except KeyError:
pass
status = response_headers.status
if status == 304:
status = 200
status_header = 'status: %d\r\n' % status
header_str = info.as_string()
header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str)
text = "".join([status_header, header_str, content])
cache.set(cachekey, text)
def _cnonce():
dig = _md5("%s:%s" % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])).hexdigest()
return dig[:16]
def _wsse_username_token(cnonce, iso_now, password):
return base64.b64encode(_sha("%s%s%s" % (cnonce, iso_now, password)).digest()).strip()
# For credentials we need two things, first
# a pool of credential to try (not necesarily tied to BAsic, Digest, etc.)
# Then we also need a list of URIs that have already demanded authentication
# That list is tricky since sub-URIs can take the same auth, or the
# auth scheme may change as you descend the tree.
# So we also need each Auth instance to be able to tell us
# how close to the 'top' it is.
class Authentication(object):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
self.path = path
self.host = host
self.credentials = credentials
self.http = http
def depth(self, request_uri):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return request_uri[len(self.path):].count("/")
def inscope(self, host, request_uri):
# XXX Should we normalize the request_uri?
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return (host == self.host) and path.startswith(self.path)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header. Over-ride this in sub-classes."""
pass
def response(self, response, content):
"""Gives us a chance to update with new nonces
or such returned from the last authorized response.
Over-rise this in sub-classes if necessary.
Return TRUE is the request is to be retried, for
example Digest may return stale=true.
"""
return False
class BasicAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'Basic ' + base64.b64encode("%s:%s" % self.credentials).strip()
class DigestAuthentication(Authentication):
"""Only do qop='auth' and MD5, since that
is all Apache currently implements"""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['digest']
qop = self.challenge.get('qop', 'auth')
self.challenge['qop'] = ('auth' in [x.strip() for x in qop.split()]) and 'auth' or None
if self.challenge['qop'] is None:
raise UnimplementedDigestAuthOptionError( _("Unsupported value for qop: %s." % qop))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'MD5').upper()
if self.challenge['algorithm'] != 'MD5':
raise UnimplementedDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.A1 = "".join([self.credentials[0], ":", self.challenge['realm'], ":", self.credentials[1]])
self.challenge['nc'] = 1
def request(self, method, request_uri, headers, content, cnonce = None):
"""Modify the request headers"""
H = lambda x: _md5(x).hexdigest()
KD = lambda s, d: H("%s:%s" % (s, d))
A2 = "".join([method, ":", request_uri])
self.challenge['cnonce'] = cnonce or _cnonce()
request_digest = '"%s"' % KD(H(self.A1), "%s:%s:%s:%s:%s" % (self.challenge['nonce'],
'%08x' % self.challenge['nc'],
self.challenge['cnonce'],
self.challenge['qop'], H(A2)
))
headers['authorization'] = 'Digest username="%s", realm="%s", nonce="%s", uri="%s", algorithm=%s, response=%s, qop=%s, nc=%08x, cnonce="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['nonce'],
request_uri,
self.challenge['algorithm'],
request_digest,
self.challenge['qop'],
self.challenge['nc'],
self.challenge['cnonce'],
)
if self.challenge.get('opaque'):
headers['authorization'] += ', opaque="%s"' % self.challenge['opaque']
self.challenge['nc'] += 1
def response(self, response, content):
if not response.has_key('authentication-info'):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('digest', {})
if 'true' == challenge.get('stale'):
self.challenge['nonce'] = challenge['nonce']
self.challenge['nc'] = 1
return True
else:
updated_challenge = _parse_www_authenticate(response, 'authentication-info').get('digest', {})
if updated_challenge.has_key('nextnonce'):
self.challenge['nonce'] = updated_challenge['nextnonce']
self.challenge['nc'] = 1
return False
class HmacDigestAuthentication(Authentication):
"""Adapted from Robert Sayre's code and DigestAuthentication above."""
__author__ = "Thomas Broyer ([email protected])"
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['hmacdigest']
# TODO: self.challenge['domain']
self.challenge['reason'] = self.challenge.get('reason', 'unauthorized')
if self.challenge['reason'] not in ['unauthorized', 'integrity']:
self.challenge['reason'] = 'unauthorized'
self.challenge['salt'] = self.challenge.get('salt', '')
if not self.challenge.get('snonce'):
raise UnimplementedHmacDigestAuthOptionError( _("The challenge doesn't contain a server nonce, or this one is empty."))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'HMAC-SHA-1')
if self.challenge['algorithm'] not in ['HMAC-SHA-1', 'HMAC-MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.challenge['pw-algorithm'] = self.challenge.get('pw-algorithm', 'SHA-1')
if self.challenge['pw-algorithm'] not in ['SHA-1', 'MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for pw-algorithm: %s." % self.challenge['pw-algorithm']))
if self.challenge['algorithm'] == 'HMAC-MD5':
self.hashmod = _md5
else:
self.hashmod = _sha
if self.challenge['pw-algorithm'] == 'MD5':
self.pwhashmod = _md5
else:
self.pwhashmod = _sha
self.key = "".join([self.credentials[0], ":",
self.pwhashmod.new("".join([self.credentials[1], self.challenge['salt']])).hexdigest().lower(),
":", self.challenge['realm']
])
self.key = self.pwhashmod.new(self.key).hexdigest().lower()
def request(self, method, request_uri, headers, content):
"""Modify the request headers"""
keys = _get_end2end_headers(headers)
keylist = "".join(["%s " % k for k in keys])
headers_val = "".join([headers[k] for k in keys])
created = time.strftime('%Y-%m-%dT%H:%M:%SZ',time.gmtime())
cnonce = _cnonce()
request_digest = "%s:%s:%s:%s:%s" % (method, request_uri, cnonce, self.challenge['snonce'], headers_val)
request_digest = hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower()
headers['authorization'] = 'HMACDigest username="%s", realm="%s", snonce="%s", cnonce="%s", uri="%s", created="%s", response="%s", headers="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['snonce'],
cnonce,
request_uri,
created,
request_digest,
keylist,
)
def response(self, response, content):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('hmacdigest', {})
if challenge.get('reason') in ['integrity', 'stale']:
return True
return False
class WsseAuthentication(Authentication):
"""This is thinly tested and should not be relied upon.
At this time there isn't any third party server to test against.
Blogger and TypePad implemented this algorithm at one point
but Blogger has since switched to Basic over HTTPS and
TypePad has implemented it wrong, by never issuing a 401
challenge but instead requiring your client to telepathically know that
their endpoint is expecting WSSE profile="UsernameToken"."""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'WSSE profile="UsernameToken"'
iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
cnonce = _cnonce()
password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1])
headers['X-WSSE'] = 'UsernameToken Username="%s", PasswordDigest="%s", Nonce="%s", Created="%s"' % (
self.credentials[0],
password_digest,
cnonce,
iso_now)
class GoogleLoginAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
from urllib import urlencode
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
service = challenge['googlelogin'].get('service', 'xapi')
# Bloggger actually returns the service in the challenge
# For the rest we guess based on the URI
if service == 'xapi' and request_uri.find("calendar") > 0:
service = "cl"
# No point in guessing Base or Spreadsheet
#elif request_uri.find("spreadsheets") > 0:
# service = "wise"
auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers['user-agent'])
resp, content = self.http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'})
lines = content.split('\n')
d = dict([tuple(line.split("=", 1)) for line in lines if line])
if resp.status == 403:
self.Auth = ""
else:
self.Auth = d['Auth']
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'GoogleLogin Auth=' + self.Auth
AUTH_SCHEME_CLASSES = {
"basic": BasicAuthentication,
"wsse": WsseAuthentication,
"digest": DigestAuthentication,
"hmacdigest": HmacDigestAuthentication,
"googlelogin": GoogleLoginAuthentication
}
AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"]
class FileCache(object):
"""Uses a local directory as a store for cached files.
Not really safe to use if multiple threads or processes are going to
be running on the same cache.
"""
def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior
self.cache = cache
self.safe = safe
if not os.path.exists(cache):
os.makedirs(self.cache)
def get(self, key):
retval = None
cacheFullPath = os.path.join(self.cache, self.safe(key))
try:
f = file(cacheFullPath, "rb")
retval = f.read()
f.close()
except IOError:
pass
return retval
def set(self, key, value):
cacheFullPath = os.path.join(self.cache, self.safe(key))
f = file(cacheFullPath, "wb")
f.write(value)
f.close()
def delete(self, key):
cacheFullPath = os.path.join(self.cache, self.safe(key))
if os.path.exists(cacheFullPath):
os.remove(cacheFullPath)
class Credentials(object):
def __init__(self):
self.credentials = []
def add(self, name, password, domain=""):
self.credentials.append((domain.lower(), name, password))
def clear(self):
self.credentials = []
def iter(self, domain):
for (cdomain, name, password) in self.credentials:
if cdomain == "" or domain == cdomain:
yield (name, password)
class KeyCerts(Credentials):
"""Identical to Credentials except that
name/password are mapped to key/cert."""
pass
class AllHosts(object):
pass
class ProxyInfo(object):
"""Collect information required to use a proxy."""
bypass_hosts = ()
def __init__(self, proxy_type, proxy_host, proxy_port,
proxy_rdns=None, proxy_user=None, proxy_pass=None):
"""The parameter proxy_type must be set to one of socks.PROXY_TYPE_XXX
constants. For example:
p = ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP,
proxy_host='localhost', proxy_port=8000)
"""
self.proxy_type = proxy_type
self.proxy_host = proxy_host
self.proxy_port = proxy_port
self.proxy_rdns = proxy_rdns
self.proxy_user = proxy_user
self.proxy_pass = proxy_pass
def astuple(self):
return (self.proxy_type, self.proxy_host, self.proxy_port,
self.proxy_rdns, self.proxy_user, self.proxy_pass)
def isgood(self):
return (self.proxy_host != None) and (self.proxy_port != None)
@classmethod
def from_environment(cls, method='http'):
"""
Read proxy info from the environment variables.
"""
if method not in ['http', 'https']:
return
env_var = method + '_proxy'
url = os.environ.get(env_var, os.environ.get(env_var.upper()))
if not url:
return
pi = cls.from_url(url, method)
no_proxy = os.environ.get('no_proxy', os.environ.get('NO_PROXY', ''))
bypass_hosts = []
if no_proxy:
bypass_hosts = no_proxy.split(',')
# special case, no_proxy=* means all hosts bypassed
if no_proxy == '*':
bypass_hosts = AllHosts
pi.bypass_hosts = bypass_hosts
return pi
@classmethod
def from_url(cls, url, method='http'):
"""
Construct a ProxyInfo from a URL (such as http_proxy env var)
"""
url = urlparse.urlparse(url)
username = None
password = None
port = None
if '@' in url[1]:
ident, host_port = url[1].split('@', 1)
if ':' in ident:
username, password = ident.split(':', 1)
else:
password = ident
else:
host_port = url[1]
if ':' in host_port:
host, port = host_port.split(':', 1)
else:
host = host_port
if port:
port = int(port)
else:
port = dict(https=443, http=80)[method]
proxy_type = 3 # socks.PROXY_TYPE_HTTP
return cls(
proxy_type = proxy_type,
proxy_host = host,
proxy_port = port,
proxy_user = username or None,
proxy_pass = password or None,
)
def applies_to(self, hostname):
return not self.bypass_host(hostname)
def bypass_host(self, hostname):
"""Has this host been excluded from the proxy config"""
if self.bypass_hosts is AllHosts:
return True
bypass = False
for domain in self.bypass_hosts:
if hostname.endswith(domain):
bypass = True
return bypass
class HTTPConnectionWithTimeout(httplib.HTTPConnection):
"""
HTTPConnection subclass that supports timeouts
All timeouts are in seconds. If None is passed for timeout then
Python's default timeout for sockets will be used. See for example
the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None):
httplib.HTTPConnection.__init__(self, host, port, strict)
self.timeout = timeout
self.proxy_info = proxy_info
def connect(self):
"""Connect to the host and port specified in __init__."""
# Mostly verbatim from httplib.py.
if self.proxy_info and socks is None:
raise ProxiesUnavailableError(
'Proxy support missing but proxy use was requested!')
msg = "getaddrinfo returns an empty list"
if self.proxy_info and self.proxy_info.isgood():
use_proxy = True
proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass = self.proxy_info.astuple()
else:
use_proxy = False
if use_proxy and proxy_rdns:
host = proxy_host
port = proxy_port
else:
host = self.host
port = self.port
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
if use_proxy:
self.sock = socks.socksocket(af, socktype, proto)
self.sock.setproxy(proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass)
else:
self.sock = socket.socket(af, socktype, proto)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# Different from httplib: support timeouts.
if has_timeout(self.timeout):
self.sock.settimeout(self.timeout)
# End of difference from httplib.
if self.debuglevel > 0:
print "connect: (%s, %s) ************" % (self.host, self.port)
if use_proxy:
print "proxy: %s ************" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass))
self.sock.connect((self.host, self.port) + sa[2:])
except socket.error, msg:
if self.debuglevel > 0:
print "connect fail: (%s, %s)" % (self.host, self.port)
if use_proxy:
print "proxy: %s" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass))
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
class HTTPSConnectionWithTimeout(httplib.HTTPSConnection):
"""
This class allows communication via SSL.
All timeouts are in seconds. If None is passed for timeout then
Python's default timeout for sockets will be used. See for example
the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None,
ca_certs=None, disable_ssl_certificate_validation=False):
httplib.HTTPSConnection.__init__(self, host, port=port, key_file=key_file,
cert_file=cert_file, strict=strict)
self.timeout = timeout
self.proxy_info = proxy_info
if ca_certs is None:
ca_certs = CA_CERTS
self.ca_certs = ca_certs
self.disable_ssl_certificate_validation = \
disable_ssl_certificate_validation
# The following two methods were adapted from https_wrapper.py, released
# with the Google Appengine SDK at
# http://googleappengine.googlecode.com/svn-history/r136/trunk/python/google/appengine/tools/https_wrapper.py
# under the following license:
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def _GetValidHostsForCert(self, cert):
"""Returns a list of valid host globs for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
Returns:
list: A list of valid host globs.
"""
if 'subjectAltName' in cert:
return [x[1] for x in cert['subjectAltName']
if x[0].lower() == 'dns']
else:
return [x[0][1] for x in cert['subject']
if x[0][0].lower() == 'commonname']
def _ValidateCertificateHostname(self, cert, hostname):
"""Validates that a given hostname is valid for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
hostname: The hostname to test.
Returns:
bool: Whether or not the hostname is valid for this certificate.
"""
hosts = self._GetValidHostsForCert(cert)
for host in hosts:
host_re = host.replace('.', '\.').replace('*', '[^.]*')
if re.search('^%s$' % (host_re,), hostname, re.I):
return True
return False
def connect(self):
"Connect to a host on a given (SSL) port."
msg = "getaddrinfo returns an empty list"
if self.proxy_info and self.proxy_info.isgood():
use_proxy = True
proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass = self.proxy_info.astuple()
else:
use_proxy = False
if use_proxy and proxy_rdns:
host = proxy_host
port = proxy_port
else:
host = self.host
port = self.port
for family, socktype, proto, canonname, sockaddr in socket.getaddrinfo(
host, port, 0, socket.SOCK_STREAM):
try:
if use_proxy:
sock = socks.socksocket(family, socktype, proto)
sock.setproxy(proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass)
else:
sock = socket.socket(family, socktype, proto)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if has_timeout(self.timeout):
sock.settimeout(self.timeout)
sock.connect((self.host, self.port))
self.sock =_ssl_wrap_socket(
sock, self.key_file, self.cert_file,
self.disable_ssl_certificate_validation, self.ca_certs)
if self.debuglevel > 0:
print "connect: (%s, %s)" % (self.host, self.port)
if use_proxy:
print "proxy: %s" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass))
if not self.disable_ssl_certificate_validation:
cert = self.sock.getpeercert()
hostname = self.host.split(':', 0)[0]
if not self._ValidateCertificateHostname(cert, hostname):
raise CertificateHostnameMismatch(
'Server presented certificate that does not match '
'host %s: %s' % (hostname, cert), hostname, cert)
except ssl_SSLError, e:
if sock:
sock.close()
if self.sock:
self.sock.close()
self.sock = None
# Unfortunately the ssl module doesn't seem to provide any way
# to get at more detailed error information, in particular
# whether the error is due to certificate validation or
# something else (such as SSL protocol mismatch).
if e.errno == ssl.SSL_ERROR_SSL:
raise SSLHandshakeError(e)
else:
raise
except (socket.timeout, socket.gaierror):
raise
except socket.error, msg:
if self.debuglevel > 0:
print "connect fail: (%s, %s)" % (self.host, self.port)
if use_proxy:
print "proxy: %s" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass))
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
SCHEME_TO_CONNECTION = {
'http': HTTPConnectionWithTimeout,
'https': HTTPSConnectionWithTimeout
}
# Use a different connection object for Google App Engine
try:
from google.appengine.api import apiproxy_stub_map
if apiproxy_stub_map.apiproxy.GetStub('urlfetch') is None:
raise ImportError # Bail out; we're not actually running on App Engine.
from google.appengine.api.urlfetch import fetch
from google.appengine.api.urlfetch import InvalidURLError
from google.appengine.api.urlfetch import DownloadError
from google.appengine.api.urlfetch import ResponseTooLargeError
from google.appengine.api.urlfetch import SSLCertificateError
class ResponseDict(dict):
"""Is a dictionary that also has a read() method, so
that it can pass itself off as an httlib.HTTPResponse()."""
def read(self):
pass
class AppEngineHttpConnection(object):
"""Emulates an httplib.HTTPConnection object, but actually uses the Google
App Engine urlfetch library. This allows the timeout to be properly used on
Google App Engine, and avoids using httplib, which on Google App Engine is
just another wrapper around urlfetch.
"""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None, ca_certs=None,
disable_ssl_certificate_validation=False):
self.host = host
self.port = port
self.timeout = timeout
if key_file or cert_file or proxy_info or ca_certs:
raise NotSupportedOnThisPlatform()
self.response = None
self.scheme = 'http'
self.validate_certificate = not disable_ssl_certificate_validation
self.sock = True
def request(self, method, url, body, headers):
# Calculate the absolute URI, which fetch requires
netloc = self.host
if self.port:
netloc = '%s:%s' % (self.host, self.port)
absolute_uri = '%s://%s%s' % (self.scheme, netloc, url)
try:
try: # 'body' can be a stream.
body = body.read()
except AttributeError:
pass
response = fetch(absolute_uri, payload=body, method=method,
headers=headers, allow_truncated=False, follow_redirects=False,
deadline=self.timeout,
validate_certificate=self.validate_certificate)
self.response = ResponseDict(response.headers)
self.response['status'] = str(response.status_code)
self.response['reason'] = httplib.responses.get(response.status_code, 'Ok')
self.response.status = response.status_code
setattr(self.response, 'read', lambda : response.content)
# Make sure the exceptions raised match the exceptions expected.
except InvalidURLError:
raise socket.gaierror('')
except (DownloadError, ResponseTooLargeError, SSLCertificateError):
raise httplib.HTTPException()
def getresponse(self):
if self.response:
return self.response
else:
raise httplib.HTTPException()
def set_debuglevel(self, level):
pass
def connect(self):
pass
def close(self):
pass
class AppEngineHttpsConnection(AppEngineHttpConnection):
"""Same as AppEngineHttpConnection, but for HTTPS URIs."""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None, ca_certs=None,
disable_ssl_certificate_validation=False):
AppEngineHttpConnection.__init__(self, host, port, key_file, cert_file,
strict, timeout, proxy_info, ca_certs, disable_ssl_certificate_validation)
self.scheme = 'https'
# Update the connection classes to use the Googel App Engine specific ones.
SCHEME_TO_CONNECTION = {
'http': AppEngineHttpConnection,
'https': AppEngineHttpsConnection
}
except ImportError:
pass
class Http(object):
"""An HTTP client that handles:
- all methods
- caching
- ETags
- compression,
- HTTPS
- Basic
- Digest
- WSSE
and more.
"""
def __init__(self, cache=None, timeout=None,
proxy_info=ProxyInfo.from_environment,
ca_certs=None, disable_ssl_certificate_validation=False):
"""If 'cache' is a string then it is used as a directory name for
a disk cache. Otherwise it must be an object that supports the
same interface as FileCache.
All timeouts are in seconds. If None is passed for timeout
then Python's default timeout for sockets will be used. See
for example the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
`proxy_info` may be:
- a callable that takes the http scheme ('http' or 'https') and
returns a ProxyInfo instance per request. By default, uses
ProxyInfo.from_environment.
- a ProxyInfo instance (static proxy config).
- None (proxy disabled).
ca_certs is the path of a file containing root CA certificates for SSL
server certificate validation. By default, a CA cert file bundled with
httplib2 is used.
If disable_ssl_certificate_validation is true, SSL cert validation will
not be performed.
"""
self.proxy_info = proxy_info
self.ca_certs = ca_certs
self.disable_ssl_certificate_validation = \
disable_ssl_certificate_validation
# Map domain name to an httplib connection
self.connections = {}
# The location of the cache, for now a directory
# where cached responses are held.
if cache and isinstance(cache, basestring):
self.cache = FileCache(cache)
else:
self.cache = cache
# Name/password
self.credentials = Credentials()
# Key/cert
self.certificates = KeyCerts()
# authorization objects
self.authorizations = []
# If set to False then no redirects are followed, even safe ones.
self.follow_redirects = True
# Which HTTP methods do we apply optimistic concurrency to, i.e.
# which methods get an "if-match:" etag header added to them.
self.optimistic_concurrency_methods = ["PUT", "PATCH"]
# If 'follow_redirects' is True, and this is set to True then
# all redirecs are followed, including unsafe ones.
self.follow_all_redirects = False
self.ignore_etag = False
self.force_exception_to_status_code = False
self.timeout = timeout
# Keep Authorization: headers on a redirect.
self.forward_authorization_headers = False
def _auth_from_challenge(self, host, request_uri, headers, response, content):
"""A generator that creates Authorization objects
that can be applied to requests.
"""
challenges = _parse_www_authenticate(response, 'www-authenticate')
for cred in self.credentials.iter(host):
for scheme in AUTH_SCHEME_ORDER:
if challenges.has_key(scheme):
yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self)
def add_credentials(self, name, password, domain=""):
"""Add a name and password that will be used
any time a request requires authentication."""
self.credentials.add(name, password, domain)
def add_certificate(self, key, cert, domain):
"""Add a key and cert that will be used
any time a request requires authentication."""
self.certificates.add(key, cert, domain)
def clear_credentials(self):
"""Remove all the names and passwords
that are used for authentication"""
self.credentials.clear()
self.authorizations = []
def _conn_request(self, conn, request_uri, method, body, headers):
for i in range(RETRIES):
try:
if conn.sock is None:
conn.connect()
conn.request(method, request_uri, body, headers)
except socket.timeout:
raise
except socket.gaierror:
conn.close()
raise ServerNotFoundError("Unable to find the server at %s" % conn.host)
except ssl_SSLError:
conn.close()
raise
except socket.error, e:
err = 0
if hasattr(e, 'args'):
err = getattr(e, 'args')[0]
else:
err = e.errno
if err == errno.ECONNREFUSED: # Connection refused
raise
except httplib.HTTPException:
# Just because the server closed the connection doesn't apparently mean
# that the server didn't send a response.
if conn.sock is None:
if i < RETRIES-1:
conn.close()
conn.connect()
continue
else:
conn.close()
raise
if i < RETRIES-1:
conn.close()
conn.connect()
continue
try:
response = conn.getresponse()
except (socket.error, httplib.HTTPException):
if i < RETRIES-1:
conn.close()
conn.connect()
continue
else:
raise
else:
content = ""
if method == "HEAD":
conn.close()
else:
content = response.read()
response = Response(response)
if method != "HEAD":
content = _decompressContent(response, content)
break
return (response, content)
def _request(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey):
"""Do the actual request using the connection object
and also follow one level of redirects if necessary"""
auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)]
auth = auths and sorted(auths)[0][1] or None
if auth:
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers)
if auth:
if auth.response(response, body):
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers )
response._stale_digest = 1
if response.status == 401:
for authorization in self._auth_from_challenge(host, request_uri, headers, response, content):
authorization.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers, )
if response.status != 401:
self.authorizations.append(authorization)
authorization.response(response, body)
break
if (self.follow_all_redirects or (method in ["GET", "HEAD"]) or response.status == 303):
if self.follow_redirects and response.status in [300, 301, 302, 303, 307]:
# Pick out the location header and basically start from the beginning
# remembering first to strip the ETag header and decrement our 'depth'
if redirections:
if not response.has_key('location') and response.status != 300:
raise RedirectMissingLocation( _("Redirected but the response is missing a Location: header."), response, content)
# Fix-up relative redirects (which violate an RFC 2616 MUST)
if response.has_key('location'):
location = response['location']
(scheme, authority, path, query, fragment) = parse_uri(location)
if authority == None:
response['location'] = urlparse.urljoin(absolute_uri, location)
if response.status == 301 and method in ["GET", "HEAD"]:
response['-x-permanent-redirect-url'] = response['location']
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
if headers.has_key('if-none-match'):
del headers['if-none-match']
if headers.has_key('if-modified-since'):
del headers['if-modified-since']
if 'authorization' in headers and not self.forward_authorization_headers:
del headers['authorization']
if response.has_key('location'):
location = response['location']
old_response = copy.deepcopy(response)
if not old_response.has_key('content-location'):
old_response['content-location'] = absolute_uri
redirect_method = method
if response.status in [302, 303]:
redirect_method = "GET"
body = None
(response, content) = self.request(location, redirect_method, body=body, headers = headers, redirections = redirections - 1)
response.previous = old_response
else:
raise RedirectLimit("Redirected more times than rediection_limit allows.", response, content)
elif response.status in [200, 203] and method in ["GET", "HEAD"]:
# Don't cache 206's since we aren't going to handle byte range requests
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
return (response, content)
def _normalize_headers(self, headers):
return _normalize_headers(headers)
# Need to catch and rebrand some exceptions
# Then need to optionally turn all exceptions into status codes
# including all socket.* and httplib.* exceptions.
def request(self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None):
""" Performs a single HTTP request.
The 'uri' is the URI of the HTTP resource and can begin
with either 'http' or 'https'. The value of 'uri' must be an absolute URI.
The 'method' is the HTTP method to perform, such as GET, POST, DELETE, etc.
There is no restriction on the methods allowed.
The 'body' is the entity body to be sent with the request. It is a string
object.
Any extra headers that are to be sent with the request should be provided in the
'headers' dictionary.
The maximum number of redirect to follow before raising an
exception is 'redirections. The default is 5.
The return value is a tuple of (response, content), the first
being and instance of the 'Response' class, the second being
a string that contains the response entity body.
"""
try:
if headers is None:
headers = {}
else:
headers = self._normalize_headers(headers)
if not headers.has_key('user-agent'):
headers['user-agent'] = "Python-httplib2/%s (gzip)" % __version__
uri = iri2uri(uri)
(scheme, authority, request_uri, defrag_uri) = urlnorm(uri)
domain_port = authority.split(":")[0:2]
if len(domain_port) == 2 and domain_port[1] == '443' and scheme == 'http':
scheme = 'https'
authority = domain_port[0]
proxy_info = self._get_proxy_info(scheme, authority)
conn_key = scheme+":"+authority
if conn_key in self.connections:
conn = self.connections[conn_key]
else:
if not connection_type:
connection_type = SCHEME_TO_CONNECTION[scheme]
certs = list(self.certificates.iter(authority))
if scheme == 'https':
if certs:
conn = self.connections[conn_key] = connection_type(
authority, key_file=certs[0][0],
cert_file=certs[0][1], timeout=self.timeout,
proxy_info=proxy_info,
ca_certs=self.ca_certs,
disable_ssl_certificate_validation=
self.disable_ssl_certificate_validation)
else:
conn = self.connections[conn_key] = connection_type(
authority, timeout=self.timeout,
proxy_info=proxy_info,
ca_certs=self.ca_certs,
disable_ssl_certificate_validation=
self.disable_ssl_certificate_validation)
else:
conn = self.connections[conn_key] = connection_type(
authority, timeout=self.timeout,
proxy_info=proxy_info)
conn.set_debuglevel(debuglevel)
if 'range' not in headers and 'accept-encoding' not in headers:
headers['accept-encoding'] = 'gzip, deflate'
info = email.Message.Message()
cached_value = None
if self.cache:
cachekey = defrag_uri
cached_value = self.cache.get(cachekey)
if cached_value:
# info = email.message_from_string(cached_value)
#
# Need to replace the line above with the kludge below
# to fix the non-existent bug not fixed in this
# bug report: http://mail.python.org/pipermail/python-bugs-list/2005-September/030289.html
try:
info, content = cached_value.split('\r\n\r\n', 1)
feedparser = email.FeedParser.FeedParser()
feedparser.feed(info)
info = feedparser.close()
feedparser._parse = None
except (IndexError, ValueError):
self.cache.delete(cachekey)
cachekey = None
cached_value = None
else:
cachekey = None
if method in self.optimistic_concurrency_methods and self.cache and info.has_key('etag') and not self.ignore_etag and 'if-match' not in headers:
# http://www.w3.org/1999/04/Editing/
headers['if-match'] = info['etag']
if method not in ["GET", "HEAD"] and self.cache and cachekey:
# RFC 2616 Section 13.10
self.cache.delete(cachekey)
# Check the vary header in the cache to see if this request
# matches what varies in the cache.
if method in ['GET', 'HEAD'] and 'vary' in info:
vary = info['vary']
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
value = info[key]
if headers.get(header, None) != value:
cached_value = None
break
if cached_value and method in ["GET", "HEAD"] and self.cache and 'range' not in headers:
if info.has_key('-x-permanent-redirect-url'):
# Should cached permanent redirects be counted in our redirection count? For now, yes.
if redirections <= 0:
raise RedirectLimit("Redirected more times than rediection_limit allows.", {}, "")
(response, new_content) = self.request(info['-x-permanent-redirect-url'], "GET", headers = headers, redirections = redirections - 1)
response.previous = Response(info)
response.previous.fromcache = True
else:
# Determine our course of action:
# Is the cached entry fresh or stale?
# Has the client requested a non-cached response?
#
# There seems to be three possible answers:
# 1. [FRESH] Return the cache entry w/o doing a GET
# 2. [STALE] Do the GET (but add in cache validators if available)
# 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request
entry_disposition = _entry_disposition(info, headers)
if entry_disposition == "FRESH":
if not cached_value:
info['status'] = '504'
content = ""
response = Response(info)
if cached_value:
response.fromcache = True
return (response, content)
if entry_disposition == "STALE":
if info.has_key('etag') and not self.ignore_etag and not 'if-none-match' in headers:
headers['if-none-match'] = info['etag']
if info.has_key('last-modified') and not 'last-modified' in headers:
headers['if-modified-since'] = info['last-modified']
elif entry_disposition == "TRANSPARENT":
pass
(response, new_content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
if response.status == 304 and method == "GET":
# Rewrite the cache entry with the new end-to-end headers
# Take all headers that are in response
# and overwrite their values in info.
# unless they are hop-by-hop, or are listed in the connection header.
for key in _get_end2end_headers(response):
info[key] = response[key]
merged_response = Response(info)
if hasattr(response, "_stale_digest"):
merged_response._stale_digest = response._stale_digest
_updateCache(headers, merged_response, content, self.cache, cachekey)
response = merged_response
response.status = 200
response.fromcache = True
elif response.status == 200:
content = new_content
else:
self.cache.delete(cachekey)
content = new_content
else:
cc = _parse_cache_control(headers)
if cc.has_key('only-if-cached'):
info['status'] = '504'
response = Response(info)
content = ""
else:
(response, content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
except Exception, e:
if self.force_exception_to_status_code:
if isinstance(e, HttpLib2ErrorWithResponse):
response = e.response
content = e.content
response.status = 500
response.reason = str(e)
elif isinstance(e, socket.timeout):
content = "Request Timeout"
response = Response( {
"content-type": "text/plain",
"status": "408",
"content-length": len(content)
})
response.reason = "Request Timeout"
else:
content = str(e)
response = Response( {
"content-type": "text/plain",
"status": "400",
"content-length": len(content)
})
response.reason = "Bad Request"
else:
raise
return (response, content)
def _get_proxy_info(self, scheme, authority):
"""Return a ProxyInfo instance (or None) based on the scheme
and authority.
"""
hostname, port = urllib.splitport(authority)
proxy_info = self.proxy_info
if callable(proxy_info):
proxy_info = proxy_info(scheme)
if (hasattr(proxy_info, 'applies_to')
and not proxy_info.applies_to(hostname)):
proxy_info = None
return proxy_info
class Response(dict):
"""An object more like email.Message than httplib.HTTPResponse."""
"""Is this response from our local cache"""
fromcache = False
"""HTTP protocol version used by server. 10 for HTTP/1.0, 11 for HTTP/1.1. """
version = 11
"Status code returned by server. "
status = 200
"""Reason phrase returned by server."""
reason = "Ok"
previous = None
def __init__(self, info):
# info is either an email.Message or
# an httplib.HTTPResponse object.
if isinstance(info, httplib.HTTPResponse):
for key, value in info.getheaders():
self[key.lower()] = value
self.status = info.status
self['status'] = str(self.status)
self.reason = info.reason
self.version = info.version
elif isinstance(info, email.Message.Message):
for key, value in info.items():
self[key.lower()] = value
self.status = int(self['status'])
else:
for key, value in info.iteritems():
self[key.lower()] = value
self.status = int(self.get('status', self.status))
self.reason = self.get('reason', self.reason)
def __getattr__(self, name):
if name == 'dict':
return self
else:
raise AttributeError, name
| gpl-2.0 |
pulilab/django-collectform | docs/conf.py | 1 | 8195 | # -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.append(parent)
import collectform
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-collectform'
copyright = u'2014, Viktor Nagy'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = collectform.__version__
# The full version, including alpha/beta/rc tags.
release = collectform.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-collectformdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-collectform.tex', u'django-collectform Documentation',
u'Viktor Nagy', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-collectform', u'django-collectform Documentation',
[u'Viktor Nagy'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-collectform', u'django-collectform Documentation',
u'Viktor Nagy', 'django-collectform', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| bsd-3-clause |
0x90sled/catapult | third_party/beautifulsoup4/scripts/demonstrate_parser_differences.py | 73 | 2976 | """Demonstrate how different parsers parse the same markup.
Beautiful Soup can use any of a number of different parsers. Every
parser should behave more or less the same on valid markup, and
Beautiful Soup's unit tests make sure this is the case. But every
parser handles invalid markup differently. Even different versions of
the same parser handle invalid markup differently. So instead of unit
tests I've created this educational demonstration script.
The file demonstration_markup.txt contains many lines of HTML. This
script tests each line of markup against every parser you have
installed, and prints out how each parser sees that markup. This may
help you choose a parser, or understand why Beautiful Soup presents
your document the way it does.
"""
import os
import sys
from bs4 import BeautifulSoup
parsers = ['html.parser']
try:
from bs4.builder import _lxml
parsers.append('lxml')
except ImportError, e:
pass
try:
from bs4.builder import _html5lib
parsers.append('html5lib')
except ImportError, e:
pass
class Demonstration(object):
def __init__(self, markup):
self.results = {}
self.markup = markup
def run_against(self, *parser_names):
uniform_results = True
previous_output = None
for parser in parser_names:
try:
soup = BeautifulSoup(self.markup, parser)
if markup.startswith("<div>"):
# Extract the interesting part
output = soup.div
else:
output = soup
except Exception, e:
output = "[EXCEPTION] %s" % str(e)
self.results[parser] = output
if previous_output is None:
previous_output = output
elif previous_output != output:
uniform_results = False
return uniform_results
def dump(self):
print "%s: %s" % ("Markup".rjust(13), self.markup.encode("utf8"))
for parser, output in self.results.items():
print "%s: %s" % (parser.rjust(13), output.encode("utf8"))
different_results = []
uniform_results = []
print "= Testing the following parsers: %s =" % ", ".join(parsers)
print
input_file = sys.stdin
if sys.stdin.isatty():
for filename in [
"demonstration_markup.txt",
os.path.join("scripts", "demonstration_markup.txt")]:
if os.path.exists(filename):
input_file = open(filename)
for markup in input_file:
demo = Demonstration(markup.decode("utf8").strip().replace("\\n", "\n"))
is_uniform = demo.run_against(*parsers)
if is_uniform:
uniform_results.append(demo)
else:
different_results.append(demo)
print "== Markup that's handled the same in every parser =="
print
for demo in uniform_results:
demo.dump()
print
print "== Markup that's not handled the same in every parser =="
print
for demo in different_results:
demo.dump()
print
| bsd-3-clause |
FallenAngelX/Infinity_MaNGOS | dep/libmpq/bindings/python/mpq.py | 501 | 10430 | """wrapper for libmpq"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import ctypes
import ctypes.util
import os
libmpq = ctypes.CDLL(ctypes.util.find_library("mpq"))
class Error(Exception):
pass
errors = {
-1: (IOError, "open"),
-2: (IOError, "close"),
-3: (IOError, "seek"),
-4: (IOError, "read"),
-5: (IOError, "write"),
-6: (MemoryError,),
-7: (Error, "file is not an mpq or is corrupted"),
-8: (AssertionError, "not initialized"),
-9: (AssertionError, "buffer size too small"),
-10: (IndexError, "file not in archive"),
-11: (AssertionError, "decrypt"),
-12: (AssertionError, "unpack"),
}
def check_error(result, func, arguments, errors=errors):
try:
error = errors[result]
except KeyError:
return result
else:
raise error[0](*error[1:])
libmpq.libmpq__version.restype = ctypes.c_char_p
libmpq.libmpq__archive_open.errcheck = check_error
libmpq.libmpq__archive_close.errcheck = check_error
libmpq.libmpq__archive_packed_size.errcheck = check_error
libmpq.libmpq__archive_unpacked_size.errcheck = check_error
libmpq.libmpq__archive_offset.errcheck = check_error
libmpq.libmpq__archive_version.errcheck = check_error
libmpq.libmpq__archive_files.errcheck = check_error
libmpq.libmpq__file_packed_size.errcheck = check_error
libmpq.libmpq__file_unpacked_size.errcheck = check_error
libmpq.libmpq__file_offset.errcheck = check_error
libmpq.libmpq__file_blocks.errcheck = check_error
libmpq.libmpq__file_encrypted.errcheck = check_error
libmpq.libmpq__file_compressed.errcheck = check_error
libmpq.libmpq__file_imploded.errcheck = check_error
libmpq.libmpq__file_number.errcheck = check_error
libmpq.libmpq__file_read.errcheck = check_error
libmpq.libmpq__block_open_offset.errcheck = check_error
libmpq.libmpq__block_close_offset.errcheck = check_error
libmpq.libmpq__block_unpacked_size.errcheck = check_error
libmpq.libmpq__block_read.errcheck = check_error
__version__ = libmpq.libmpq__version()
class Reader(object):
def __init__(self, file, libmpq=libmpq):
self._file = file
self._pos = 0
self._buf = []
self._cur_block = 0
libmpq.libmpq__block_open_offset(self._file._archive._mpq,
self._file.number)
def __iter__(self):
return self
def __repr__(self):
return "iter(%r)" % self._file
def seek(self, offset, whence=os.SEEK_SET, os=os):
if whence == os.SEEK_SET:
pass
elif whence == os.SEEK_CUR:
offset += self._pos
elif whence == os.SEEK_END:
offset += self._file.unpacked_size
else:
raise ValueError, "invalid whence"
if offset >= self._pos:
self.read(offset - self._pos)
else:
self._pos = 0
self._buf = []
self._cur_block = 0
self.read(offset)
def tell(self):
return self._pos
def _read_block(self, ctypes=ctypes, libmpq=libmpq):
block_size = ctypes.c_uint64()
libmpq.libmpq__block_unpacked_size(self._file._archive._mpq,
self._file.number, self._cur_block, ctypes.byref(block_size))
block_data = ctypes.create_string_buffer(block_size.value)
libmpq.libmpq__block_read(self._file._archive._mpq,
self._file.number, self._cur_block,
block_data, ctypes.c_uint64(len(block_data)), None)
self._buf.append(block_data.raw)
self._cur_block += 1
def read(self, size=-1):
while size < 0 or sum(map(len, self._buf)) < size:
if self._cur_block == self._file.blocks:
break
self._read_block()
buf = "".join(self._buf)
if size < 0:
ret = buf
self._buf = []
else:
ret = buf[:size]
self._buf = [buf[size:]]
self._pos += len(ret)
return ret
def readline(self, os=os):
line = []
while True:
char = self.read(1)
if char == "":
break
if char not in '\r\n' and line and line[-1] in '\r\n':
self.seek(-1, os.SEEK_CUR)
break
line.append(char)
return ''.join(line)
def next(self):
line = self.readline()
if not line:
raise StopIteration
return line
def readlines(self, sizehint=-1):
res = []
while sizehint < 0 or sum(map(len, res)) < sizehint:
line = self.readline()
if not line:
break
res.append(line)
return res
xreadlines = __iter__
def __del__(self, libmpq=libmpq):
libmpq.libmpq__block_close_offset(self._file._archive._mpq,
self._file.number)
class File(object):
def __init__(self, archive, number, ctypes=ctypes, libmpq=libmpq):
self._archive = archive
self.number = number
for name, atype in [
("packed_size", ctypes.c_uint64),
("unpacked_size", ctypes.c_uint64),
("offset", ctypes.c_uint64),
("blocks", ctypes.c_uint32),
("encrypted", ctypes.c_uint32),
("compressed", ctypes.c_uint32),
("imploded", ctypes.c_uint32),
]:
data = atype()
func = getattr(libmpq, "libmpq__file_"+name)
func(self._archive._mpq, self.number, ctypes.byref(data))
setattr(self, name, data.value)
def __str__(self, ctypes=ctypes, libmpq=libmpq):
data = ctypes.create_string_buffer(self.unpacked_size)
libmpq.libmpq__file_read(self._archive._mpq, self.number,
data, ctypes.c_uint64(len(data)), None)
return data.raw
def __repr__(self):
return "%r[%i]" % (self._archive, self.number)
def __iter__(self, Reader=Reader):
return Reader(self)
class Archive(object):
def __init__(self, source, ctypes=ctypes, File=File, libmpq=libmpq):
self._source = source
if isinstance(source, File):
assert not source.encrypted
assert not source.compressed
assert not source.imploded
self.filename = source._archive.filename
offset = source._archive.offset + source.offset
else:
self.filename = source
offset = -1
self._mpq = ctypes.c_void_p()
libmpq.libmpq__archive_open(ctypes.byref(self._mpq), self.filename,
ctypes.c_uint64(offset))
self._opened = True
for field_name, field_type in [
("packed_size", ctypes.c_uint64),
("unpacked_size", ctypes.c_uint64),
("offset", ctypes.c_uint64),
("version", ctypes.c_uint32),
("files", ctypes.c_uint32),
]:
func = getattr(libmpq, "libmpq__archive_" + field_name)
data = field_type()
func(self._mpq, ctypes.byref(data))
setattr(self, field_name, data.value)
def __del__(self, libmpq=libmpq):
if getattr(self, "_opened", False):
libmpq.libmpq__archive_close(self._mpq)
def __len__(self):
return self.files
def __contains__(self, item, ctypes=ctypes, libmpq=libmpq):
if isinstance(item, str):
data = ctypes.c_uint32()
try:
libmpq.libmpq__file_number(self._mpq, ctypes.c_char_p(item),
ctypes.byref(data))
except IndexError:
return False
return True
return 0 <= item < self.files
def __getitem__(self, item, ctypes=ctypes, File=File, libmpq=libmpq):
if isinstance(item, str):
data = ctypes.c_int()
libmpq.libmpq__file_number(self._mpq, ctypes.c_char_p(item),
ctypes.byref(data))
item = data.value
else:
if not 0 <= item < self.files:
raise IndexError, "file not in archive"
return File(self, item)
def __repr__(self):
return "mpq.Archive(%r)" % self._source
# Remove clutter - everything except Error and Archive.
del os, check_error, ctypes, errors, File, libmpq, Reader
if __name__ == "__main__":
import sys, random
archive = Archive(sys.argv[1])
print repr(archive)
for k, v in archive.__dict__.iteritems():
#if k[0] == '_': continue
print " " * (4 - 1), k, v
assert '(listfile)' in archive
assert 0 in archive
assert len(archive) == archive.files
files = [x.strip() for x in archive['(listfile)']]
files.extend(xrange(archive.files))
for key in files: #sys.argv[2:] if sys.argv[2:] else xrange(archive.files):
file = archive[key]
print
print " " * (4 - 1), repr(file)
for k, v in file.__dict__.iteritems():
#if k[0] == '_': continue
print " " * (8 - 1), k, v
a = str(file)
b = iter(file).read()
reader = iter(file)
c = []
while True:
l = random.randrange(1, 10)
d = reader.read(l)
if not d: break
assert len(d) <= l
c.append(d)
c = "".join(c)
d = []
reader.seek(0)
for line in reader:
d.append(line)
d = "".join(d)
assert a == b == c == d, map(hash, [a,b,c,d])
assert len(a) == file.unpacked_size
repr(iter(file))
reader.seek(0)
a = reader.readlines()
reader.seek(0)
b = list(reader)
assert a == b
| gpl-2.0 |
rlaverde/scorecard_cps | performance_indicators_project/performance_indicators_project/urls.py | 1 | 2366 | from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.conf import settings
from django.views.generic import TemplateView
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', TemplateView.as_view(template_name='index.html'), name='home'),
url(r'^config$', TemplateView.as_view(template_name='edit_models.html'), name='editmodels'),
url(r'^map/$', TemplateView.as_view(template_name='strategic_map.html'), name='map'),
url(r'^accounts/', include('accounts.urls', namespace='accounts')),
url(r'^perspectives/', include('perspectives.urls', namespace='perspectives')),
url(r'^targets/', include('perspectives.urls_target', namespace='targets')),
url(r'^initiatives/', include('perspectives.urls_initiative', namespace='initiatives')),
url(r'^resources/', include('perspectives.urls_resource', namespace='resources')),
url(r'^incharge/', include('perspectives.urls_incharge', namespace='incharge')),
url(r'^committee/', include('perspectives.urls_committee', namespace='committees')),
url(r'^indicators/', include('indicators.urls', namespace='indicators')),
url(r'^main_indicators/', include('indicators.urls_main_indicator', namespace='main_indicators')),
url(r'^parameters/', include('indicators.urls_parameter', namespace='parameters')),
url(r'^periods/', include('periods.urls', namespace='periods')),
url(r'^reports/', include('periods.urls_report', namespace='reports')),
# Examples:
# url(r'^$', 'performance_indicators_project.views.home', name='home'),
# url(r'^performance_indicators_project/', include('performance_indicators_project.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
# Uncomment the next line to serve media files in dev.
# urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
import debug_toolbar
urlpatterns += patterns('',
url(r'^__debug__/', include(debug_toolbar.urls)),
)
| gpl-3.0 |
AlMikFox3/Ciphers | VignereCipher/vignere_cipher.py | 1 | 1867 | import time
import random
LETTERS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def encrypt():
fname = input('Enter the name/path of the file to be encrypted : ')
f = open(fname, 'r')
msg = f.read()
f.close()
#key = input ('Enter Security Key (character string) for encryption :')
key = ''
kl = random.randint(10,17)
for i in range (0,kl):
n = random.randint(0,25)
key+= LETTERS[n]
keyf = ((str(time.ctime())).replace(' ','_')).replace(':','_')
f = open('key' + keyf,'w')
f.write(str(key))
print ('key_'+keyf+" generated....")
enc = ''
keyindex = 0
for symbol in msg :
num = LETTERS.find(symbol.upper())
if num != -1 :
num += LETTERS.find(key[keyindex])
num = num % len(LETTERS)
enc += LETTERS[num]
keyindex += 1
if (keyindex == len(key)):
keyindex = 0
else :
enc += symbol
keyf = ((str(time.ctime())).replace(' ','_')).replace(':','_')
f = open('enc' + keyf,'w')
f.write(str(enc))
f.close()
print ('ENCRYPTION SUCCESSFUL ! enc_'+keyf+" generated....")
def decrypt():
fname = input('Enter the name/path of the file to be decrypted : ')
f = open(fname, 'r')
msg = f.read()
f.close()
key = input ('Enter Security Key (character string) for decryption :')
enc = ''
keyindex = 0
for symbol in msg :
num = LETTERS.find(symbol.upper())
if num != -1 :
num -= LETTERS.find(key[keyindex])
num = num % len(LETTERS)
enc += LETTERS[num]
keyindex += 1
if (keyindex == len(key)):
keyindex = 0
else :
enc += symbol
keyf = ((str(time.ctime())).replace(' ','_')).replace(':','_')
f = open('dec' + keyf,'w')
f.write(str(enc))
f.close()
print ('DECRYPTION SUCCESSFUL ! dec_'+keyf+" generated....")
mode = input ('E/D ? - ')
if(mode == 'E' or mode == 'e'):
encrypt()
elif (mode == 'D' or mode == 'd'):
decrypt() | mit |
lamdnhan/osf.io | website/addons/github/views/config.py | 37 | 2946 | # -*- coding: utf-8 -*-
import httplib as http
from flask import request
from framework.auth.decorators import must_be_logged_in
from framework.exceptions import HTTPError
from website.project.decorators import must_have_permission
from website.project.decorators import must_not_be_registration
from website.project.decorators import must_have_addon
from ..api import GitHub
@must_be_logged_in
def github_set_user_config(**kwargs):
return {}
@must_have_permission('write')
@must_have_addon('github', 'node')
@must_not_be_registration
def github_set_config(**kwargs):
auth = kwargs['auth']
user = auth.user
node_settings = kwargs['node_addon']
node = node_settings.owner
user_settings = node_settings.user_settings
# If authorized, only owner can change settings
if user_settings and user_settings.owner != user:
raise HTTPError(http.BAD_REQUEST)
# Parse request
github_user_name = request.json.get('github_user', '')
github_repo_name = request.json.get('github_repo', '')
# Verify that repo exists and that user can access
connection = GitHub.from_settings(user_settings)
repo = connection.repo(github_user_name, github_repo_name)
if repo is None:
if user_settings:
message = (
'Cannot access repo. Either the repo does not exist '
'or your account does not have permission to view it.'
)
else:
message = (
'Cannot access repo.'
)
return {'message': message}, http.BAD_REQUEST
if not github_user_name or not github_repo_name:
raise HTTPError(http.BAD_REQUEST)
changed = (
github_user_name != node_settings.user or
github_repo_name != node_settings.repo
)
# Update hooks
if changed:
# Delete existing hook, if any
node_settings.delete_hook()
# Update node settings
node_settings.user = github_user_name
node_settings.repo = github_repo_name
# Log repo select
node.add_log(
action='github_repo_linked',
params={
'project': node.parent_id,
'node': node._id,
'github': {
'user': github_user_name,
'repo': github_repo_name,
}
},
auth=auth,
)
# Add new hook
if node_settings.user and node_settings.repo:
node_settings.add_hook(save=False)
node_settings.save()
return {}
@must_have_permission('write')
@must_have_addon('github', 'node')
def github_set_privacy(**kwargs):
github = kwargs['node_addon']
private = request.form.get('private')
if private is None:
raise HTTPError(http.BAD_REQUEST)
connection = GitHub.from_settings(github.user_settings)
connection.set_privacy(github.user, github.repo, private)
| apache-2.0 |
vk-brain/sketal | plugins/outsource/outsource_emotions_detector.py | 2 | 4569 | from handler.base_plugin import CommandPlugin
import aiohttp, json, time
class EmotionsDetectorPlugin(CommandPlugin):
__slots__ = ("key", "dirt", "clean_time", "requests_amount", "time_delta")
def __init__(self, *commands, prefixes=None, strict=False, key=None, time_delta=60, requests_amount=15):
"""Answers with results of detecting emotions on sent image."""
if not key:
raise AttributeError("You didn't specified key! You can get it here: https://azure.microsoft.com/ru-ru/services/cognitive-services/face/")
if not commands:
commands = ("эмоции",)
super().__init__(*commands, prefixes=prefixes, strict=strict)
self.key = key
self.dirt = 0
self.time_delta = time_delta
self.clean_time = time.time() + time_delta
self.requests_amount = requests_amount
self.description = [f"Детектор эмоций",
f"{self.command_example()} - распознать эмоции на лице'."]
async def process_message(self, msg):
if self.dirt >= self.requests_amount:
if time.time() >= self.clean_time:
self.dirt = 0
self.clean_time = time.time() + self.time_delta
else:
return await msg.answer('Лимит запросов исчерпан! Попробуйте через минуту или две.')
photo = False
for k, v in msg.brief_attaches.items():
if '_type' in k and v == "photo":
photo = True
break
if not photo:
return await msg.answer('Вы не прислали фото!')
attach = (await msg.get_full_attaches())[0]
if not attach.url:
return await msg.answer('Вы не прислали фото!')
uri_base = 'https://westcentralus.api.cognitive.microsoft.com'
# Request headers.
headers = {'Content-Type': 'application/json', 'Ocp-Apim-Subscription-Key': self.key}
params = {'returnFaceId': 'true', 'returnFaceLandmarks': 'false', 'returnFaceAttributes': 'age,gender,emotion'}
body = {'url': attach.url}
try: # Execute the REST API call and get the response.
self.dirt += 1
async with aiohttp.ClientSession() as sess:
async with sess.post(uri_base + '/face/v1.0/detect', data=None, json=body, headers=headers, params=params) as resp:
response = await resp.text()
parsed = json.loads(response)
answer = ""
for i, e in enumerate(parsed):
age = e["faceAttributes"]["age"]
sex = "женский" if e["faceAttributes"]['gender'] == "female" else "мужской"
fear = e["faceAttributes"]["emotion"]["fear"]
anger = e["faceAttributes"]["emotion"]["anger"]
contempt = e["faceAttributes"]["emotion"]["contempt"]
disgust = e["faceAttributes"]["emotion"]["disgust"]
happiness = e["faceAttributes"]["emotion"]["happiness"]
neutral = e["faceAttributes"]["emotion"]["neutral"]
sadness = e["faceAttributes"]["emotion"]["sadness"]
surprise = e["faceAttributes"]["emotion"]["surprise"]
answer += f"Анализ фотографии (лицо #{i + 1})\n💁♂️Возраст: {age}\n👫Пол: {sex}\n😵Страх: {fear}\n😤Злость: {anger}\n" \
f"😐Презрение: {contempt}\n🤢Отвращение: {disgust}\n🙂Счастье: {happiness}\n" \
f"😶Нейтральность: {neutral}\n😔Грусть: {sadness}\n😯Удивление: {surprise}\n\n"
if not answer:
raise ValueError("No answer")
return await msg.answer(answer)
except TypeError:
return await msg.answer(chat_id=msg.chat_id, message="Ошибочка! Наверное, мой ключ доступа перестал работать.")
except ValueError:
pass
except Exception as e:
import traceback
traceback.print_exc()
await msg.answer(chat_id=msg.chat_id, message="Не удалось обнаружить лицо на фотографии")
| mit |
joerg84/arangodb | 3rdParty/V8/v5.7.0.0/tools/gyp/test/mac/gyptest-app.py | 34 | 4074 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that app bundles are built correctly.
"""
import TestGyp
import TestMac
import os
import plistlib
import subprocess
import sys
if sys.platform in ('darwin', 'win32'):
print "This test is currently disabled: https://crbug.com/483696."
sys.exit(0)
def CheckFileXMLPropertyList(file):
output = subprocess.check_output(['file', file])
# The double space after XML is intentional.
if not 'XML document text' in output:
print 'File: Expected XML document text, got %s' % output
test.fail_test()
def ExpectEq(expected, actual):
if expected != actual:
print >>sys.stderr, 'Expected "%s", got "%s"' % (expected, actual)
test.fail_test()
def ls(path):
'''Returns a list of all files in a directory, relative to the directory.'''
result = []
for dirpath, _, files in os.walk(path):
for f in files:
result.append(os.path.join(dirpath, f)[len(path) + 1:])
return result
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
test.run_gyp('test.gyp', chdir='app-bundle')
test.build('test.gyp', test.ALL, chdir='app-bundle')
# Binary
test.built_file_must_exist('Test App Gyp.app/Contents/MacOS/Test App Gyp',
chdir='app-bundle')
# Info.plist
info_plist = test.built_file_path('Test App Gyp.app/Contents/Info.plist',
chdir='app-bundle')
test.must_exist(info_plist)
test.must_contain(info_plist, 'com.google.Test-App-Gyp') # Variable expansion
test.must_not_contain(info_plist, '${MACOSX_DEPLOYMENT_TARGET}');
CheckFileXMLPropertyList(info_plist)
if test.format != 'make':
# TODO: Synthesized plist entries aren't hooked up in the make generator.
machine = subprocess.check_output(['sw_vers', '-buildVersion']).rstrip('\n')
plist = plistlib.readPlist(info_plist)
ExpectEq(machine, plist['BuildMachineOSBuild'])
# Prior to Xcode 5.0.0, SDKROOT (and thus DTSDKName) was only defined if
# set in the Xcode project file. Starting with that version, it is always
# defined.
expected = ''
if TestMac.Xcode.Version() >= '0500':
version = TestMac.Xcode.SDKVersion()
expected = 'macosx' + version
ExpectEq(expected, plist['DTSDKName'])
sdkbuild = TestMac.Xcode.SDKBuild()
if not sdkbuild:
# Above command doesn't work in Xcode 4.2.
sdkbuild = plist['BuildMachineOSBuild']
ExpectEq(sdkbuild, plist['DTSDKBuild'])
ExpectEq(TestMac.Xcode.Version(), plist['DTXcode'])
ExpectEq(TestMac.Xcode.Build(), plist['DTXcodeBuild'])
# Resources
strings_files = ['InfoPlist.strings', 'utf-16be.strings', 'utf-16le.strings']
for f in strings_files:
strings = test.built_file_path(
os.path.join('Test App Gyp.app/Contents/Resources/English.lproj', f),
chdir='app-bundle')
test.must_exist(strings)
# Xcodes writes UTF-16LE with BOM.
contents = open(strings, 'rb').read()
if not contents.startswith('\xff\xfe' + '/* Localized'.encode('utf-16le')):
test.fail_test()
test.built_file_must_exist(
'Test App Gyp.app/Contents/Resources/English.lproj/MainMenu.nib',
chdir='app-bundle')
# Packaging
test.built_file_must_exist('Test App Gyp.app/Contents/PkgInfo',
chdir='app-bundle')
test.built_file_must_match('Test App Gyp.app/Contents/PkgInfo', 'APPLause',
chdir='app-bundle')
# Check that no other files get added to the bundle.
if set(ls(test.built_file_path('Test App Gyp.app', chdir='app-bundle'))) != \
set(['Contents/MacOS/Test App Gyp',
'Contents/Info.plist',
'Contents/Resources/English.lproj/MainMenu.nib',
'Contents/PkgInfo',
] +
[os.path.join('Contents/Resources/English.lproj', f)
for f in strings_files]):
test.fail_test()
test.pass_test()
| apache-2.0 |
georgestarcher/TA-SyncKVStore | bin/ta_synckvstore/solnlib/packages/splunklib/searchcommands/search_command.py | 4 | 38354 | # coding=utf-8
#
# Copyright © 2011-2015 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
# Absolute imports
from ..client import Service
from collections import namedtuple
try:
from collections import OrderedDict # must be python 2.7
except ImportError:
from ..ordereddict import OrderedDict
from copy import deepcopy
from cStringIO import StringIO
from itertools import chain, ifilter, imap, islice, izip
from logging import _levelNames, getLevelName, getLogger
try:
from shutil import make_archive
except ImportError:
# Used for recording, skip on python 2.6
pass
from time import time
from urllib import unquote
from urlparse import urlsplit
from warnings import warn
from xml.etree import ElementTree
import os
import sys
import re
import csv
import tempfile
import traceback
# Relative imports
from . internals import (
CommandLineParser,
CsvDialect,
InputHeader,
Message,
MetadataDecoder,
MetadataEncoder,
ObjectView,
Recorder,
RecordWriterV1,
RecordWriterV2,
json_encode_string)
from . import Boolean, Option, environment
# ----------------------------------------------------------------------------------------------------------------------
# P1 [ ] TODO: Log these issues against ChunkedExternProcessor
#
# 1. Implement requires_preop configuration setting.
# This configuration setting is currently rejected by ChunkedExternProcessor.
#
# 2. Rename type=events as type=eventing for symmetry with type=reporting and type=streaming
# Eventing commands process records on the events pipeline. This change effects ChunkedExternProcessor.cpp,
# eventing_command.py, and generating_command.py.
#
# 3. For consistency with SCPV1, commands.conf should not require filename setting when chunked = true
# The SCPV1 processor uses <stanza-name>.py as the default filename. The ChunkedExternProcessor should do the same.
# P1 [ ] TODO: Verify that ChunkedExternProcessor complains if a streaming_preop has a type other than 'streaming'
# It once looked like sending type='reporting' for the streaming_preop was accepted.
# ----------------------------------------------------------------------------------------------------------------------
# P2 [ ] TODO: Consider bumping None formatting up to Option.Item.__str__
class SearchCommand(object):
""" Represents a custom search command.
"""
def __init__(self):
# Variables that may be used, but not altered by derived classes
class_name = self.__class__.__name__
self._logger, self._logging_configuration = getLogger(class_name), environment.logging_configuration
# Variables backing option/property values
self._configuration = self.ConfigurationSettings(self)
self._input_header = InputHeader()
self._fieldnames = None
self._finished = None
self._metadata = None
self._options = None
self._protocol_version = None
self._search_results_info = None
self._service = None
# Internal variables
self._default_logging_level = self._logger.level
self._record_writer = None
self._records = None
def __str__(self):
text = ' '.join(chain((type(self).name, str(self.options)), [] if self.fieldnames is None else self.fieldnames))
return text
# region Options
@Option
def logging_configuration(self):
""" **Syntax:** logging_configuration=<path>
**Description:** Loads an alternative logging configuration file for
a command invocation. The logging configuration file must be in Python
ConfigParser-format. Path names are relative to the app root directory.
"""
return self._logging_configuration
@logging_configuration.setter
def logging_configuration(self, value):
self._logger, self._logging_configuration = environment.configure_logging(self.__class__.__name__, value)
@Option
def logging_level(self):
""" **Syntax:** logging_level=[CRITICAL|ERROR|WARNING|INFO|DEBUG|NOTSET]
**Description:** Sets the threshold for the logger of this command invocation. Logging messages less severe than
`logging_level` will be ignored.
"""
return getLevelName(self._logger.getEffectiveLevel())
@logging_level.setter
def logging_level(self, value):
if value is None:
value = self._default_logging_level
if isinstance(value, (bytes, unicode)):
try:
level = _levelNames[value.upper()]
except KeyError:
raise ValueError('Unrecognized logging level: {}'.format(value))
else:
try:
level = int(value)
except ValueError:
raise ValueError('Unrecognized logging level: {}'.format(value))
self._logger.setLevel(level)
record = Option(doc='''
**Syntax: record=<bool>
**Description:** When `true`, records the interaction between the command and splunkd. Defaults to `false`.
''', default=False, validate=Boolean())
show_configuration = Option(doc='''
**Syntax:** show_configuration=<bool>
**Description:** When `true`, reports command configuration as an informational message. Defaults to `false`.
''', default=False, validate=Boolean())
# endregion
# region Properties
@property
def configuration(self):
""" Returns the configuration settings for this command.
"""
return self._configuration
@property
def fieldnames(self):
""" Returns the fieldnames specified as argument to this command.
"""
return self._fieldnames
@fieldnames.setter
def fieldnames(self, value):
self._fieldnames = value
@property
def input_header(self):
""" Returns the input header for this command.
:return: The input header for this command.
:rtype: InputHeader
"""
warn(
'SearchCommand.input_header is deprecated and will be removed in a future release. '
'Please use SearchCommand.metadata instead.', DeprecationWarning, 2)
return self._input_header
@property
def logger(self):
""" Returns the logger for this command.
:return: The logger for this command.
:rtype:
"""
return self._logger
@property
def metadata(self):
return self._metadata
@property
def options(self):
""" Returns the options specified as argument to this command.
"""
if self._options is None:
self._options = Option.View(self)
return self._options
@property
def protocol_version(self):
return self._protocol_version
@property
def search_results_info(self):
""" Returns the search results info for this command invocation.
The search results info object is created from the search results info file associated with the command
invocation.
:return: Search results info:const:`None`, if the search results info file associated with the command
invocation is inaccessible.
:rtype: SearchResultsInfo or NoneType
"""
if self._search_results_info is not None:
return self._search_results_info
if self._protocol_version == 1:
try:
path = self._input_header['infoPath']
except KeyError:
return None
else:
assert self._protocol_version == 2
try:
dispatch_dir = self._metadata.searchinfo.dispatch_dir
except AttributeError:
return None
path = os.path.join(dispatch_dir, 'info.csv')
try:
with open(path, 'rb') as f:
reader = csv.reader(f, dialect=CsvDialect)
fields = reader.next()
values = reader.next()
except IOError as error:
if error.errno == 2:
self.logger.error('Search results info file {} does not exist.'.format(json_encode_string(path)))
return
raise
def convert_field(field):
return (field[1:] if field[0] == '_' else field).replace('.', '_')
decode = MetadataDecoder().decode
def convert_value(value):
try:
return decode(value) if len(value) > 0 else value
except ValueError:
return value
info = ObjectView(dict(imap(lambda (f, v): (convert_field(f), convert_value(v)), izip(fields, values))))
try:
count_map = info.countMap
except AttributeError:
pass
else:
count_map = count_map.split(';')
n = len(count_map)
info.countMap = dict(izip(islice(count_map, 0, n, 2), islice(count_map, 1, n, 2)))
try:
msg_type = info.msgType
msg_text = info.msg
except AttributeError:
pass
else:
messages = ifilter(lambda (t, m): t or m, izip(msg_type.split('\n'), msg_text.split('\n')))
info.msg = [Message(message) for message in messages]
del info.msgType
try:
info.vix_families = ElementTree.fromstring(info.vix_families)
except AttributeError:
pass
self._search_results_info = info
return info
@property
def service(self):
""" Returns a Splunk service object for this command invocation or None.
The service object is created from the Splunkd URI and authentication token passed to the command invocation in
the search results info file. This data is not passed to a command invocation by default. You must request it by
specifying this pair of configuration settings in commands.conf:
.. code-block:: python
enableheader = true
requires_srinfo = true
The :code:`enableheader` setting is :code:`true` by default. Hence, you need not set it. The
:code:`requires_srinfo` setting is false by default. Hence, you must set it.
:return: :class:`splunklib.client.Service`, if :code:`enableheader` and :code:`requires_srinfo` are both
:code:`true`. Otherwise, if either :code:`enableheader` or :code:`requires_srinfo` are :code:`false`, a value
of :code:`None` is returned.
"""
if self._service is not None:
return self._service
metadata = self._metadata
if metadata is None:
return None
try:
searchinfo = self._metadata.searchinfo
except AttributeError:
return None
splunkd_uri = searchinfo.splunkd_uri
if splunkd_uri is None:
return None
uri = urlsplit(splunkd_uri, allow_fragments=False)
self._service = Service(
scheme=uri.scheme, host=uri.hostname, port=uri.port, app=searchinfo.app, token=searchinfo.session_key)
return self._service
# endregion
# region Methods
def error_exit(self, error, message=None):
self.write_error(error.message if message is None else message)
self.logger.error('Abnormal exit: %s', error)
exit(1)
def finish(self):
""" Flushes the output buffer and signals that this command has finished processing data.
:return: :const:`None`
"""
self._record_writer.flush(finished=True)
def flush(self):
""" Flushes the output buffer.
:return: :const:`None`
"""
self._record_writer.flush(partial=True)
def prepare(self):
""" Prepare for execution.
This method should be overridden in search command classes that wish to examine and update their configuration
or option settings prior to execution. It is called during the getinfo exchange before command metadata is sent
to splunkd.
:return: :const:`None`
:rtype: NoneType
"""
pass
def process(self, argv=sys.argv, ifile=sys.stdin, ofile=sys.stdout):
""" Process data.
:param argv: Command line arguments.
:type argv: list or tuple
:param ifile: Input data file.
:type ifile: file
:param ofile: Output data file.
:type ofile: file
:return: :const:`None`
:rtype: NoneType
"""
if len(argv) > 1:
self._process_protocol_v1(argv, ifile, ofile)
else:
self._process_protocol_v2(argv, ifile, ofile)
def _map_input_header(self):
metadata = self._metadata
searchinfo = metadata.searchinfo
self._input_header.update(
allowStream=None,
infoPath=os.path.join(searchinfo.dispatch_dir, 'info.csv'),
keywords=None,
preview=metadata.preview,
realtime=searchinfo.earliest_time != 0 and searchinfo.latest_time != 0,
search=searchinfo.search,
sid=searchinfo.sid,
splunkVersion=searchinfo.splunk_version,
truncated=None)
def _map_metadata(self, argv):
source = SearchCommand._MetadataSource(argv, self._input_header, self.search_results_info)
def _map(metadata_map):
metadata = {}
for name, value in metadata_map.iteritems():
if isinstance(value, dict):
value = _map(value)
else:
transform, extract = value
if extract is None:
value = None
else:
value = extract(source)
if not (value is None or transform is None):
value = transform(value)
metadata[name] = value
return ObjectView(metadata)
self._metadata = _map(SearchCommand._metadata_map)
_metadata_map = {
'action':
(lambda v: 'getinfo' if v == '__GETINFO__' else 'execute' if v == '__EXECUTE__' else None, lambda s: s.argv[1]),
'preview':
(bool, lambda s: s.input_header.get('preview')),
'searchinfo': {
'app':
(lambda v: v.ppc_app, lambda s: s.search_results_info),
'args':
(None, lambda s: s.argv),
'dispatch_dir':
(os.path.dirname, lambda s: s.input_header.get('infoPath')),
'earliest_time':
(lambda v: float(v.rt_earliest) if len(v.rt_earliest) > 0 else 0.0, lambda s: s.search_results_info),
'latest_time':
(lambda v: float(v.rt_latest) if len(v.rt_latest) > 0 else 0.0, lambda s: s.search_results_info),
'owner':
(None, None),
'raw_args':
(None, lambda s: s.argv),
'search':
(unquote, lambda s: s.input_header.get('search')),
'session_key':
(lambda v: v.auth_token, lambda s: s.search_results_info),
'sid':
(None, lambda s: s.input_header.get('sid')),
'splunk_version':
(None, lambda s: s.input_header.get('splunkVersion')),
'splunkd_uri':
(lambda v: v.splunkd_uri, lambda s: s.search_results_info),
'username':
(lambda v: v.ppc_user, lambda s: s.search_results_info)}}
_MetadataSource = namedtuple(b'Source', (b'argv', b'input_header', b'search_results_info'))
def _prepare_protocol_v1(self, argv, ifile, ofile):
debug = environment.splunklib_logger.debug
# Provide as much context as possible in advance of parsing the command line and preparing for execution
self._input_header.read(ifile)
self._protocol_version = 1
self._map_metadata(argv)
debug(' metadata=%r, input_header=%r', self._metadata, self._input_header)
try:
tempfile.tempdir = self._metadata.searchinfo.dispatch_dir
except AttributeError:
raise RuntimeError('{}.metadata.searchinfo.dispatch_dir is undefined'.format(self.__class__.__name__))
debug(' tempfile.tempdir=%r', tempfile.tempdir)
CommandLineParser.parse(self, argv[2:])
self.prepare()
if self.record:
self.record = False
record_argv = [argv[0], argv[1], str(self._options), ' '.join(self.fieldnames)]
ifile, ofile = self._prepare_recording(record_argv, ifile, ofile)
self._record_writer.ofile = ofile
ifile.record(str(self._input_header), '\n\n')
if self.show_configuration:
self.write_info(self.name + ' command configuration: ' + str(self._configuration))
return ifile # wrapped, if self.record is True
def _prepare_recording(self, argv, ifile, ofile):
# Create the recordings directory, if it doesn't already exist
recordings = os.path.join(environment.splunk_home, 'var', 'run', 'splunklib.searchcommands', 'recordings')
if not os.path.isdir(recordings):
os.makedirs(recordings)
# Create input/output recorders from ifile and ofile
recording = os.path.join(recordings, self.__class__.__name__ + '-' + repr(time()) + '.' + self._metadata.action)
ifile = Recorder(recording + '.input', ifile)
ofile = Recorder(recording + '.output', ofile)
# Archive the dispatch directory--if it exists--so that it can be used as a baseline in mocks)
dispatch_dir = self._metadata.searchinfo.dispatch_dir
if dispatch_dir is not None: # __GETINFO__ action does not include a dispatch_dir
root_dir, base_dir = os.path.split(dispatch_dir)
make_archive(recording + '.dispatch_dir', 'gztar', root_dir, base_dir, logger=self.logger)
# Save a splunk command line because it is useful for developing tests
with open(recording + '.splunk_cmd', 'wb') as f:
f.write('splunk cmd python '.encode())
f.write(os.path.basename(argv[0]).encode())
for arg in islice(argv, 1, len(argv)):
f.write(' '.encode())
f.write(arg.encode())
return ifile, ofile
def _process_protocol_v1(self, argv, ifile, ofile):
debug = environment.splunklib_logger.debug
class_name = self.__class__.__name__
debug('%s.process started under protocol_version=1', class_name)
self._record_writer = RecordWriterV1(ofile)
# noinspection PyBroadException
try:
if argv[1] == '__GETINFO__':
debug('Writing configuration settings')
ifile = self._prepare_protocol_v1(argv, ifile, ofile)
self._record_writer.write_record(dict(
(n, ','.join(v) if isinstance(v, (list, tuple)) else v) for n, v in self._configuration.iteritems()))
self.finish()
elif argv[1] == '__EXECUTE__':
debug('Executing')
ifile = self._prepare_protocol_v1(argv, ifile, ofile)
self._records = self._records_protocol_v1
self._metadata.action = 'execute'
self._execute(ifile, None)
else:
message = (
'Command {0} appears to be statically configured for search command protocol version 1 and static '
'configuration is unsupported by splunklib.searchcommands. Please ensure that '
'default/commands.conf contains this stanza:\n'
'[{0}]\n'
'filename = {1}\n'
'enableheader = true\n'
'outputheader = true\n'
'requires_srinfo = true\n'
'supports_getinfo = true\n'
'supports_multivalues = true\n'
'supports_rawargs = true'.format(self.name, os.path.basename(argv[0])))
raise RuntimeError(message)
except (SyntaxError, ValueError) as error:
self.write_error(unicode(error))
self.flush()
exit(0)
except SystemExit:
self.flush()
raise
except:
self._report_unexpected_error()
self.flush()
exit(1)
debug('%s.process finished under protocol_version=1', class_name)
def _process_protocol_v2(self, argv, ifile, ofile):
""" Processes records on the `input stream optionally writing records to the output stream.
:param ifile: Input file object.
:type ifile: file or InputType
:param ofile: Output file object.
:type ofile: file or OutputType
:return: :const:`None`
"""
debug = environment.splunklib_logger.debug
class_name = self.__class__.__name__
debug('%s.process started under protocol_version=2', class_name)
self._protocol_version = 2
# Read search command metadata from splunkd
# noinspection PyBroadException
try:
debug('Reading metadata')
metadata, body = self._read_chunk(ifile)
action = getattr(metadata, 'action', None)
if action != 'getinfo':
raise RuntimeError('Expected getinfo action, not {}'.format(action))
if len(body) > 0:
raise RuntimeError('Did not expect data for getinfo action')
self._metadata = deepcopy(metadata)
searchinfo = self._metadata.searchinfo
searchinfo.earliest_time = float(searchinfo.earliest_time)
searchinfo.latest_time = float(searchinfo.latest_time)
searchinfo.search = unquote(searchinfo.search)
self._map_input_header()
debug(' metadata=%r, input_header=%r', self._metadata, self._input_header)
try:
tempfile.tempdir = self._metadata.searchinfo.dispatch_dir
except AttributeError:
raise RuntimeError('%s.metadata.searchinfo.dispatch_dir is undefined'.format(class_name))
debug(' tempfile.tempdir=%r', tempfile.tempdir)
except:
self._record_writer = RecordWriterV2(ofile)
self._report_unexpected_error()
self.finish()
exit(1)
# Write search command configuration for consumption by splunkd
# noinspection PyBroadException
try:
self._record_writer = RecordWriterV2(ofile, getattr(self._metadata, 'maxresultrows', None))
self.fieldnames = []
self.options.reset()
args = self.metadata.searchinfo.args
error_count = 0
debug('Parsing arguments')
if args and type(args) == list:
for arg in args:
result = arg.split('=', 1)
if len(result) == 1:
self.fieldnames.append(result[0])
else:
name, value = result
try:
option = self.options[name]
except KeyError:
self.write_error('Unrecognized option: {}={}'.format(name, value))
error_count += 1
continue
try:
option.value = value
except ValueError:
self.write_error('Illegal value: {}={}'.format(name, value))
error_count += 1
continue
missing = self.options.get_missing()
if missing is not None:
if len(missing) == 1:
self.write_error('A value for "{}" is required'.format(missing[0]))
else:
self.write_error('Values for these required options are missing: {}'.format(', '.join(missing)))
error_count += 1
if error_count > 0:
exit(1)
debug(' command: %s', unicode(self))
debug('Preparing for execution')
self.prepare()
if self.record:
ifile, ofile = self._prepare_recording(argv, ifile, ofile)
self._record_writer.ofile = ofile
# Record the metadata that initiated this command after removing the record option from args/raw_args
info = self._metadata.searchinfo
for attr in 'args', 'raw_args':
setattr(info, attr, [arg for arg in getattr(info, attr) if not arg.startswith('record=')])
metadata = MetadataEncoder().encode(self._metadata)
ifile.record('chunked 1.0,', unicode(len(metadata)), ',0\n', metadata)
if self.show_configuration:
self.write_info(self.name + ' command configuration: ' + str(self._configuration))
debug(' command configuration: %s', self._configuration)
except SystemExit:
self._record_writer.write_metadata(self._configuration)
self.finish()
raise
except:
self._record_writer.write_metadata(self._configuration)
self._report_unexpected_error()
self.finish()
exit(1)
self._record_writer.write_metadata(self._configuration)
# Execute search command on data passing through the pipeline
# noinspection PyBroadException
try:
debug('Executing under protocol_version=2')
self._records = self._records_protocol_v2
self._metadata.action = 'execute'
self._execute(ifile, None)
except SystemExit:
self.finish()
raise
except:
self._report_unexpected_error()
self.finish()
exit(1)
debug('%s.process completed', class_name)
def write_debug(self, message, *args):
self._record_writer.write_message('DEBUG', message, *args)
def write_error(self, message, *args):
self._record_writer.write_message('ERROR', message, *args)
def write_fatal(self, message, *args):
self._record_writer.write_message('FATAL', message, *args)
def write_info(self, message, *args):
self._record_writer.write_message('INFO', message, *args)
def write_warning(self, message, *args):
self._record_writer.write_message('WARN', message, *args)
def write_metric(self, name, value):
""" Writes a metric that will be added to the search inspector.
:param name: Name of the metric.
:type name: basestring
:param value: A 4-tuple containing the value of metric :param:`name` where
value[0] = Elapsed seconds or :const:`None`.
value[1] = Number of invocations or :const:`None`.
value[2] = Input count or :const:`None`.
value[3] = Output count or :const:`None`.
The :data:`SearchMetric` type provides a convenient encapsulation of :param:`value`.
The :data:`SearchMetric` type provides a convenient encapsulation of :param:`value`.
:return: :const:`None`.
"""
self._record_writer.write_metric(name, value)
# P2 [ ] TODO: Support custom inspector values
@staticmethod
def _decode_list(mv):
return [match.replace('$$', '$') for match in SearchCommand._encoded_value.findall(mv)]
_encoded_value = re.compile(r'\$(?P<item>(?:\$\$|[^$])*)\$(?:;|$)') # matches a single value in an encoded list
def _execute(self, ifile, process):
""" Default processing loop
:param ifile: Input file object.
:type ifile: file
:param process: Bound method to call in processing loop.
:type process: instancemethod
:return: :const:`None`.
:rtype: NoneType
"""
self._record_writer.write_records(process(self._records(ifile)))
self.finish()
@staticmethod
def _read_chunk(ifile):
# noinspection PyBroadException
try:
header = ifile.readline()
except Exception as error:
raise RuntimeError('Failed to read transport header: {}'.format(error))
if not header:
return None
match = SearchCommand._header.match(header)
if match is None:
raise RuntimeError('Failed to parse transport header: {}'.format(header))
metadata_length, body_length = match.groups()
metadata_length = int(metadata_length)
body_length = int(body_length)
try:
metadata = ifile.read(metadata_length)
except Exception as error:
raise RuntimeError('Failed to read metadata of length {}: {}'.format(metadata_length, error))
decoder = MetadataDecoder()
try:
metadata = decoder.decode(metadata)
except Exception as error:
raise RuntimeError('Failed to parse metadata of length {}: {}'.format(metadata_length, error))
# if body_length <= 0:
# return metadata, ''
try:
body = ifile.read(body_length)
except Exception as error:
raise RuntimeError('Failed to read body of length {}: {}'.format(body_length, error))
return metadata, body
_header = re.compile(r'chunked\s+1.0\s*,\s*(\d+)\s*,\s*(\d+)\s*\n')
def _records_protocol_v1(self, ifile):
reader = csv.reader(ifile, dialect=CsvDialect)
try:
fieldnames = reader.next()
except StopIteration:
return
mv_fieldnames = dict([(name, name[len('__mv_'):]) for name in fieldnames if name.startswith('__mv_')])
if len(mv_fieldnames) == 0:
for values in reader:
yield OrderedDict(izip(fieldnames, values))
return
for values in reader:
record = OrderedDict()
for fieldname, value in izip(fieldnames, values):
if fieldname.startswith('__mv_'):
if len(value) > 0:
record[mv_fieldnames[fieldname]] = self._decode_list(value)
elif fieldname not in record:
record[fieldname] = value
yield record
def _records_protocol_v2(self, ifile):
while True:
result = self._read_chunk(ifile)
if not result:
return
metadata, body = result
action = getattr(metadata, 'action', None)
if action != 'execute':
raise RuntimeError('Expected execute action, not {}'.format(action))
finished = getattr(metadata, 'finished', False)
self._record_writer.is_flushed = False
if len(body) > 0:
reader = csv.reader(StringIO(body), dialect=CsvDialect)
try:
fieldnames = reader.next()
except StopIteration:
return
mv_fieldnames = dict([(name, name[len('__mv_'):]) for name in fieldnames if name.startswith('__mv_')])
if len(mv_fieldnames) == 0:
for values in reader:
yield OrderedDict(izip(fieldnames, values))
else:
for values in reader:
record = OrderedDict()
for fieldname, value in izip(fieldnames, values):
if fieldname.startswith('__mv_'):
if len(value) > 0:
record[mv_fieldnames[fieldname]] = self._decode_list(value)
elif fieldname not in record:
record[fieldname] = value
yield record
if finished:
return
self.flush()
def _report_unexpected_error(self):
error_type, error, tb = sys.exc_info()
origin = tb
while origin.tb_next is not None:
origin = origin.tb_next
filename = origin.tb_frame.f_code.co_filename
lineno = origin.tb_lineno
message = '{0} at "{1}", line {2:d} : {3}'.format(error_type.__name__, filename, lineno, error)
environment.splunklib_logger.error(message + '\nTraceback:\n' + ''.join(traceback.format_tb(tb)))
self.write_error(message)
# endregion
# region Types
class ConfigurationSettings(object):
""" Represents the configuration settings common to all :class:`SearchCommand` classes.
"""
def __init__(self, command):
self.command = command
def __repr__(self):
""" Converts the value of this instance to its string representation.
The value of this ConfigurationSettings instance is represented as a string of comma-separated
:code:`(name, value)` pairs.
:return: String representation of this instance
"""
definitions = type(self).configuration_setting_definitions
settings = imap(
lambda setting: repr((setting.name, setting.__get__(self), setting.supporting_protocols)), definitions)
return '[' + ', '.join(settings) + ']'
def __str__(self):
""" Converts the value of this instance to its string representation.
The value of this ConfigurationSettings instance is represented as a string of comma-separated
:code:`name=value` pairs. Items with values of :const:`None` are filtered from the list.
:return: String representation of this instance
"""
text = ', '.join(imap(lambda (name, value): name + '=' + json_encode_string(unicode(value)), self.iteritems()))
return text
# region Methods
@classmethod
def fix_up(cls, command_class):
""" Adjusts and checks this class and its search command class.
Derived classes typically override this method. It is used by the :decorator:`Configuration` decorator to
fix up the :class:`SearchCommand` class it adorns. This method is overridden by :class:`EventingCommand`,
:class:`GeneratingCommand`, :class:`ReportingCommand`, and :class:`StreamingCommand`, the base types for
all other search commands.
:param command_class: Command class targeted by this class
"""
return
def iteritems(self):
definitions = type(self).configuration_setting_definitions
version = self.command.protocol_version
return ifilter(
lambda (name, value): value is not None, imap(
lambda setting: (setting.name, setting.__get__(self)), ifilter(
lambda setting: setting.is_supported_by_protocol(version), definitions)))
pass # endregion
pass # endregion
SearchMetric = namedtuple(b'SearchMetric', (b'elapsed_seconds', b'invocation_count', b'input_count', b'output_count'))
def dispatch(command_class, argv=sys.argv, input_file=sys.stdin, output_file=sys.stdout, module_name=None):
""" Instantiates and executes a search command class
This function implements a `conditional script stanza <https://docs.python.org/2/library/__main__.html>`_ based on the value of
:code:`module_name`::
if module_name is None or module_name == '__main__':
# execute command
Call this function at module scope with :code:`module_name=__name__`, if you would like your module to act as either
a reusable module or a standalone program. Otherwise, if you wish this function to unconditionally instantiate and
execute :code:`command_class`, pass :const:`None` as the value of :code:`module_name`.
:param command_class: Search command class to instantiate and execute.
:type command_class: type
:param argv: List of arguments to the command.
:type argv: list or tuple
:param input_file: File from which the command will read data.
:type input_file: :code:`file`
:param output_file: File to which the command will write data.
:type output_file: :code:`file`
:param module_name: Name of the module calling :code:`dispatch` or :const:`None`.
:type module_name: :code:`basestring`
:returns: :const:`None`
**Example**
.. code-block:: python
:linenos:
#!/usr/bin/env python
from splunklib.searchcommands import dispatch, StreamingCommand, Configuration, Option, validators
@Configuration()
class SomeStreamingCommand(StreamingCommand):
...
def stream(records):
...
dispatch(SomeStreamingCommand, module_name=__name__)
Dispatches the :code:`SomeStreamingCommand`, if and only if :code:`__name__` is equal to :code:`'__main__'`.
**Example**
.. code-block:: python
:linenos:
from splunklib.searchcommands import dispatch, StreamingCommand, Configuration, Option, validators
@Configuration()
class SomeStreamingCommand(StreamingCommand):
...
def stream(records):
...
dispatch(SomeStreamingCommand)
Unconditionally dispatches :code:`SomeStreamingCommand`.
"""
assert issubclass(command_class, SearchCommand)
if module_name is None or module_name == '__main__':
command_class().process(argv, input_file, output_file)
| mit |
seankelly/buildbot | master/buildbot/test/fake/state.py | 10 | 1108 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
class State(object):
"""
A simple class you can use to keep track of state throughout
a test. Just assign whatever you want to its attributes. Its
constructor provides a shortcut to setting initial values for
attributes
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
| gpl-2.0 |
pymedusa/Medusa | ext/boto/beanstalk/wrapper.py | 153 | 1078 | """Wraps layer1 api methods and converts layer1 dict responses to objects."""
from boto.beanstalk.layer1 import Layer1
import boto.beanstalk.response
from boto.exception import BotoServerError
import boto.beanstalk.exception as exception
def beanstalk_wrapper(func, name):
def _wrapped_low_level_api(*args, **kwargs):
try:
response = func(*args, **kwargs)
except BotoServerError as e:
raise exception.simple(e)
# Turn 'this_is_a_function_name' into 'ThisIsAFunctionNameResponse'.
cls_name = ''.join([part.capitalize() for part in name.split('_')]) + 'Response'
cls = getattr(boto.beanstalk.response, cls_name)
return cls(response)
return _wrapped_low_level_api
class Layer1Wrapper(object):
def __init__(self, *args, **kwargs):
self.api = Layer1(*args, **kwargs)
def __getattr__(self, name):
try:
return beanstalk_wrapper(getattr(self.api, name), name)
except AttributeError:
raise AttributeError("%s has no attribute %r" % (self, name))
| gpl-3.0 |
SatoshiNXSimudrone/sl4a-damon-clone | python-modules/pybluez/examples/advanced/inquiry-with-rssi.py | 20 | 5076 | # performs a simple device inquiry, followed by a remote name request of each
# discovered device
import os
import sys
import struct
import _bluetooth as bluez
def printpacket(pkt):
for c in pkt:
sys.stdout.write("%02x " % struct.unpack("B",c)[0])
print
def read_inquiry_mode(sock):
"""returns the current mode, or -1 on failure"""
# save current filter
old_filter = sock.getsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, 14)
# Setup socket filter to receive only events related to the
# read_inquiry_mode command
flt = bluez.hci_filter_new()
opcode = bluez.cmd_opcode_pack(bluez.OGF_HOST_CTL,
bluez.OCF_READ_INQUIRY_MODE)
bluez.hci_filter_set_ptype(flt, bluez.HCI_EVENT_PKT)
bluez.hci_filter_set_event(flt, bluez.EVT_CMD_COMPLETE);
bluez.hci_filter_set_opcode(flt, opcode)
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, flt )
# first read the current inquiry mode.
bluez.hci_send_cmd(sock, bluez.OGF_HOST_CTL,
bluez.OCF_READ_INQUIRY_MODE )
pkt = sock.recv(255)
status,mode = struct.unpack("xxxxxxBB", pkt)
if status != 0: mode = -1
# restore old filter
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, old_filter )
return mode
def write_inquiry_mode(sock, mode):
"""returns 0 on success, -1 on failure"""
# save current filter
old_filter = sock.getsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, 14)
# Setup socket filter to receive only events related to the
# write_inquiry_mode command
flt = bluez.hci_filter_new()
opcode = bluez.cmd_opcode_pack(bluez.OGF_HOST_CTL,
bluez.OCF_WRITE_INQUIRY_MODE)
bluez.hci_filter_set_ptype(flt, bluez.HCI_EVENT_PKT)
bluez.hci_filter_set_event(flt, bluez.EVT_CMD_COMPLETE);
bluez.hci_filter_set_opcode(flt, opcode)
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, flt )
# send the command!
bluez.hci_send_cmd(sock, bluez.OGF_HOST_CTL,
bluez.OCF_WRITE_INQUIRY_MODE, struct.pack("B", mode) )
pkt = sock.recv(255)
status = struct.unpack("xxxxxxB", pkt)[0]
# restore old filter
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, old_filter )
if status != 0: return -1
return 0
def device_inquiry_with_with_rssi(sock):
# save current filter
old_filter = sock.getsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, 14)
# perform a device inquiry on bluetooth device #0
# The inquiry should last 8 * 1.28 = 10.24 seconds
# before the inquiry is performed, bluez should flush its cache of
# previously discovered devices
flt = bluez.hci_filter_new()
bluez.hci_filter_all_events(flt)
bluez.hci_filter_set_ptype(flt, bluez.HCI_EVENT_PKT)
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, flt )
duration = 4
max_responses = 255
cmd_pkt = struct.pack("BBBBB", 0x33, 0x8b, 0x9e, duration, max_responses)
bluez.hci_send_cmd(sock, bluez.OGF_LINK_CTL, bluez.OCF_INQUIRY, cmd_pkt)
results = []
done = False
while not done:
pkt = sock.recv(255)
ptype, event, plen = struct.unpack("BBB", pkt[:3])
if event == bluez.EVT_INQUIRY_RESULT_WITH_RSSI:
pkt = pkt[3:]
nrsp = struct.unpack("B", pkt[0])[0]
for i in range(nrsp):
addr = bluez.ba2str( pkt[1+6*i:1+6*i+6] )
rssi = struct.unpack("b", pkt[1+13*nrsp+i])[0]
results.append( ( addr, rssi ) )
print "[%s] RSSI: [%d]" % (addr, rssi)
elif event == bluez.EVT_INQUIRY_COMPLETE:
done = True
elif event == bluez.EVT_CMD_STATUS:
status, ncmd, opcode = struct.unpack("BBH", pkt[3:7])
if status != 0:
print "uh oh..."
printpacket(pkt[3:7])
done = True
elif event == bluez.EVT_INQUIRY_RESULT:
pkt = pkt[3:]
nrsp = struct.unpack("B", pkt[0])[0]
for i in range(nrsp):
addr = bluez.ba2str( pkt[1+6*i:1+6*i+6] )
results.append( ( addr, -1 ) )
print "[%s] (no RRSI)" % addr
else:
print "unrecognized packet type 0x%02x" % ptype
print "event ", event
# restore old filter
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, old_filter )
return results
dev_id = 0
try:
sock = bluez.hci_open_dev(dev_id)
except:
print "error accessing bluetooth device..."
sys.exit(1)
try:
mode = read_inquiry_mode(sock)
except Exception, e:
print "error reading inquiry mode. "
print "Are you sure this a bluetooth 1.2 device?"
print e
sys.exit(1)
print "current inquiry mode is %d" % mode
if mode != 1:
print "writing inquiry mode..."
try:
result = write_inquiry_mode(sock, 1)
except Exception, e:
print "error writing inquiry mode. Are you sure you're root?"
print e
sys.exit(1)
if result != 0:
print "error while setting inquiry mode"
print "result: %d" % result
device_inquiry_with_with_rssi(sock)
| apache-2.0 |
havt/odoo | openerp/tools/convert.py | 205 | 41282 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import cStringIO
import csv
import logging
import os.path
import pickle
import re
import sys
# for eval context:
import time
import openerp
import openerp.release
import openerp.workflow
from yaml_import import convert_yaml_import
import assertion_report
_logger = logging.getLogger(__name__)
try:
import pytz
except:
_logger.warning('could not find pytz library, please install it')
class pytzclass(object):
all_timezones=[]
pytz=pytzclass()
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from lxml import etree, builder
import misc
from config import config
from translate import _
# List of etree._Element subclasses that we choose to ignore when parsing XML.
from misc import SKIPPED_ELEMENT_TYPES
from misc import unquote
from openerp import SUPERUSER_ID
# Import of XML records requires the unsafe eval as well,
# almost everywhere, which is ok because it supposedly comes
# from trusted data, but at least we make it obvious now.
unsafe_eval = eval
from safe_eval import safe_eval as eval
class ParseError(Exception):
def __init__(self, msg, text, filename, lineno):
self.msg = msg
self.text = text
self.filename = filename
self.lineno = lineno
def __str__(self):
return '"%s" while parsing %s:%s, near\n%s' \
% (self.msg, self.filename, self.lineno, self.text)
def _ref(self, cr):
return lambda x: self.id_get(cr, x)
def _obj(pool, cr, uid, model_str, context=None):
model = pool[model_str]
return lambda x: model.browse(cr, uid, x, context=context)
def _get_idref(self, cr, uid, model_str, context, idref):
idref2 = dict(idref,
time=time,
DateTime=datetime,
datetime=datetime,
timedelta=timedelta,
relativedelta=relativedelta,
version=openerp.release.major_version,
ref=_ref(self, cr),
pytz=pytz)
if len(model_str):
idref2['obj'] = _obj(self.pool, cr, uid, model_str, context=context)
return idref2
def _fix_multiple_roots(node):
"""
Surround the children of the ``node`` element of an XML field with a
single root "data" element, to prevent having a document with multiple
roots once parsed separately.
XML nodes should have one root only, but we'd like to support
direct multiple roots in our partial documents (like inherited view architectures).
As a convention we'll surround multiple root with a container "data" element, to be
ignored later when parsing.
"""
real_nodes = [x for x in node if not isinstance(x, SKIPPED_ELEMENT_TYPES)]
if len(real_nodes) > 1:
data_node = etree.Element("data")
for child in node:
data_node.append(child)
node.append(data_node)
def _eval_xml(self, node, pool, cr, uid, idref, context=None):
if context is None:
context = {}
if node.tag in ('field','value'):
t = node.get('type','char')
f_model = node.get('model', '').encode('utf-8')
if node.get('search'):
f_search = node.get("search",'').encode('utf-8')
f_use = node.get("use",'id').encode('utf-8')
f_name = node.get("name",'').encode('utf-8')
idref2 = {}
if f_search:
idref2 = _get_idref(self, cr, uid, f_model, context, idref)
q = unsafe_eval(f_search, idref2)
ids = pool[f_model].search(cr, uid, q)
if f_use != 'id':
ids = map(lambda x: x[f_use], pool[f_model].read(cr, uid, ids, [f_use]))
_cols = pool[f_model]._columns
if (f_name in _cols) and _cols[f_name]._type=='many2many':
return ids
f_val = False
if len(ids):
f_val = ids[0]
if isinstance(f_val, tuple):
f_val = f_val[0]
return f_val
a_eval = node.get('eval','')
if a_eval:
idref2 = _get_idref(self, cr, uid, f_model, context, idref)
try:
return unsafe_eval(a_eval, idref2)
except Exception:
logging.getLogger('openerp.tools.convert.init').error(
'Could not eval(%s) for %s in %s', a_eval, node.get('name'), context)
raise
def _process(s, idref):
matches = re.finditer('[^%]%\((.*?)\)[ds]', s)
done = []
for m in matches:
found = m.group()[1:]
if found in done:
continue
done.append(found)
id = m.groups()[0]
if not id in idref:
idref[id] = self.id_get(cr, id)
s = s.replace(found, str(idref[id]))
s = s.replace('%%', '%') # Quite wierd but it's for (somewhat) backward compatibility sake
return s
if t == 'xml':
_fix_multiple_roots(node)
return '<?xml version="1.0"?>\n'\
+_process("".join([etree.tostring(n, encoding='utf-8')
for n in node]), idref)
if t == 'html':
return _process("".join([etree.tostring(n, encoding='utf-8')
for n in node]), idref)
data = node.text
if node.get('file'):
with openerp.tools.file_open(node.get('file'), 'rb') as f:
data = f.read()
if t == 'file':
from ..modules import module
path = data.strip()
if not module.get_module_resource(self.module, path):
raise IOError("No such file or directory: '%s' in %s" % (
path, self.module))
return '%s,%s' % (self.module, path)
if t == 'char':
return data
if t == 'base64':
return data.encode('base64')
if t == 'int':
d = data.strip()
if d == 'None':
return None
return int(d)
if t == 'float':
return float(data.strip())
if t in ('list','tuple'):
res=[]
for n in node.iterchildren(tag='value'):
res.append(_eval_xml(self,n,pool,cr,uid,idref))
if t=='tuple':
return tuple(res)
return res
elif node.tag == "function":
args = []
a_eval = node.get('eval','')
# FIXME: should probably be exclusive
if a_eval:
idref['ref'] = lambda x: self.id_get(cr, x)
args = unsafe_eval(a_eval, idref)
for n in node:
return_val = _eval_xml(self,n, pool, cr, uid, idref, context)
if return_val is not None:
args.append(return_val)
model = pool[node.get('model', '')]
method = node.get('name')
res = getattr(model, method)(cr, uid, *args)
return res
elif node.tag == "test":
return node.text
escape_re = re.compile(r'(?<!\\)/')
def escape(x):
return x.replace('\\/', '/')
class xml_import(object):
@staticmethod
def nodeattr2bool(node, attr, default=False):
if not node.get(attr):
return default
val = node.get(attr).strip()
if not val:
return default
return val.lower() not in ('0', 'false', 'off')
def isnoupdate(self, data_node=None):
return self.noupdate or (len(data_node) and self.nodeattr2bool(data_node, 'noupdate', False))
def get_context(self, data_node, node, eval_dict):
data_node_context = (len(data_node) and data_node.get('context','').encode('utf8'))
node_context = node.get("context",'').encode('utf8')
context = {}
for ctx in (data_node_context, node_context):
if ctx:
try:
ctx_res = unsafe_eval(ctx, eval_dict)
if isinstance(context, dict):
context.update(ctx_res)
else:
context = ctx_res
except NameError:
# Some contexts contain references that are only valid at runtime at
# client-side, so in that case we keep the original context string
# as it is. We also log it, just in case.
context = ctx
_logger.debug('Context value (%s) for element with id "%s" or its data node does not parse '\
'at server-side, keeping original string, in case it\'s meant for client side only',
ctx, node.get('id','n/a'), exc_info=True)
return context
def get_uid(self, cr, uid, data_node, node):
node_uid = node.get('uid','') or (len(data_node) and data_node.get('uid',''))
if node_uid:
return self.id_get(cr, node_uid)
return uid
def _test_xml_id(self, xml_id):
id = xml_id
if '.' in xml_id:
module, id = xml_id.split('.', 1)
assert '.' not in id, """The ID reference "%s" must contain
maximum one dot. They are used to refer to other modules ID, in the
form: module.record_id""" % (xml_id,)
if module != self.module:
modcnt = self.pool['ir.module.module'].search_count(self.cr, self.uid, ['&', ('name', '=', module), ('state', 'in', ['installed'])])
assert modcnt == 1, """The ID "%s" refers to an uninstalled module""" % (xml_id,)
if len(id) > 64:
_logger.error('id: %s is to long (max: 64)', id)
def _tag_delete(self, cr, rec, data_node=None, mode=None):
d_model = rec.get("model")
d_search = rec.get("search",'').encode('utf-8')
d_id = rec.get("id")
ids = []
if d_search:
idref = _get_idref(self, cr, self.uid, d_model, context={}, idref={})
try:
ids = self.pool[d_model].search(cr, self.uid, unsafe_eval(d_search, idref))
except ValueError:
_logger.warning('Skipping deletion for failed search `%r`', d_search, exc_info=True)
pass
if d_id:
try:
ids.append(self.id_get(cr, d_id))
except ValueError:
# d_id cannot be found. doesn't matter in this case
_logger.warning('Skipping deletion for missing XML ID `%r`', d_id, exc_info=True)
pass
if ids:
self.pool[d_model].unlink(cr, self.uid, ids)
def _remove_ir_values(self, cr, name, value, model):
ir_values_obj = self.pool['ir.values']
ir_value_ids = ir_values_obj.search(cr, self.uid, [('name','=',name),('value','=',value),('model','=',model)])
if ir_value_ids:
ir_values_obj.unlink(cr, self.uid, ir_value_ids)
return True
def _tag_report(self, cr, rec, data_node=None, mode=None):
res = {}
for dest,f in (('name','string'),('model','model'),('report_name','name')):
res[dest] = rec.get(f,'').encode('utf8')
assert res[dest], "Attribute %s of report is empty !" % (f,)
for field,dest in (('rml','report_rml'),('file','report_rml'),('xml','report_xml'),('xsl','report_xsl'),
('attachment','attachment'),('attachment_use','attachment_use'), ('usage','usage'),
('report_type', 'report_type'), ('parser', 'parser')):
if rec.get(field):
res[dest] = rec.get(field).encode('utf8')
if rec.get('auto'):
res['auto'] = eval(rec.get('auto','False'))
if rec.get('sxw'):
sxw_content = misc.file_open(rec.get('sxw')).read()
res['report_sxw_content'] = sxw_content
if rec.get('header'):
res['header'] = eval(rec.get('header','False'))
res['multi'] = rec.get('multi') and eval(rec.get('multi','False'))
xml_id = rec.get('id','').encode('utf8')
self._test_xml_id(xml_id)
if rec.get('groups'):
g_names = rec.get('groups','').split(',')
groups_value = []
for group in g_names:
if group.startswith('-'):
group_id = self.id_get(cr, group[1:])
groups_value.append((3, group_id))
else:
group_id = self.id_get(cr, group)
groups_value.append((4, group_id))
res['groups_id'] = groups_value
id = self.pool['ir.model.data']._update(cr, self.uid, "ir.actions.report.xml", self.module, res, xml_id, noupdate=self.isnoupdate(data_node), mode=self.mode)
self.idref[xml_id] = int(id)
if not rec.get('menu') or eval(rec.get('menu','False')):
keyword = str(rec.get('keyword', 'client_print_multi'))
value = 'ir.actions.report.xml,'+str(id)
replace = rec.get('replace', True)
self.pool['ir.model.data'].ir_set(cr, self.uid, 'action', keyword, res['name'], [res['model']], value, replace=replace, isobject=True, xml_id=xml_id)
elif self.mode=='update' and eval(rec.get('menu','False'))==False:
# Special check for report having attribute menu=False on update
value = 'ir.actions.report.xml,'+str(id)
self._remove_ir_values(cr, res['name'], value, res['model'])
return id
def _tag_function(self, cr, rec, data_node=None, mode=None):
if self.isnoupdate(data_node) and self.mode != 'init':
return
context = self.get_context(data_node, rec, {'ref': _ref(self, cr)})
uid = self.get_uid(cr, self.uid, data_node, rec)
_eval_xml(self,rec, self.pool, cr, uid, self.idref, context=context)
return
def _tag_url(self, cr, rec, data_node=None, mode=None):
url = rec.get("url",'').encode('utf8')
target = rec.get("target",'').encode('utf8')
name = rec.get("name",'').encode('utf8')
xml_id = rec.get('id','').encode('utf8')
self._test_xml_id(xml_id)
res = {'name': name, 'url': url, 'target':target}
id = self.pool['ir.model.data']._update(cr, self.uid, "ir.actions.act_url", self.module, res, xml_id, noupdate=self.isnoupdate(data_node), mode=self.mode)
self.idref[xml_id] = int(id)
def _tag_act_window(self, cr, rec, data_node=None, mode=None):
name = rec.get('name','').encode('utf-8')
xml_id = rec.get('id','').encode('utf8')
self._test_xml_id(xml_id)
type = rec.get('type','').encode('utf-8') or 'ir.actions.act_window'
view_id = False
if rec.get('view_id'):
view_id = self.id_get(cr, rec.get('view_id','').encode('utf-8'))
domain = rec.get('domain','').encode('utf-8') or '[]'
res_model = rec.get('res_model','').encode('utf-8')
src_model = rec.get('src_model','').encode('utf-8')
view_type = rec.get('view_type','').encode('utf-8') or 'form'
view_mode = rec.get('view_mode','').encode('utf-8') or 'tree,form'
usage = rec.get('usage','').encode('utf-8')
limit = rec.get('limit','').encode('utf-8')
auto_refresh = rec.get('auto_refresh','').encode('utf-8')
uid = self.uid
# Act_window's 'domain' and 'context' contain mostly literals
# but they can also refer to the variables provided below
# in eval_context, so we need to eval() them before storing.
# Among the context variables, 'active_id' refers to
# the currently selected items in a list view, and only
# takes meaning at runtime on the client side. For this
# reason it must remain a bare variable in domain and context,
# even after eval() at server-side. We use the special 'unquote'
# class to achieve this effect: a string which has itself, unquoted,
# as representation.
active_id = unquote("active_id")
active_ids = unquote("active_ids")
active_model = unquote("active_model")
def ref(str_id):
return self.id_get(cr, str_id)
# Include all locals() in eval_context, for backwards compatibility
eval_context = {
'name': name,
'xml_id': xml_id,
'type': type,
'view_id': view_id,
'domain': domain,
'res_model': res_model,
'src_model': src_model,
'view_type': view_type,
'view_mode': view_mode,
'usage': usage,
'limit': limit,
'auto_refresh': auto_refresh,
'uid' : uid,
'active_id': active_id,
'active_ids': active_ids,
'active_model': active_model,
'ref' : ref,
}
context = self.get_context(data_node, rec, eval_context)
try:
domain = unsafe_eval(domain, eval_context)
except NameError:
# Some domains contain references that are only valid at runtime at
# client-side, so in that case we keep the original domain string
# as it is. We also log it, just in case.
_logger.debug('Domain value (%s) for element with id "%s" does not parse '\
'at server-side, keeping original string, in case it\'s meant for client side only',
domain, xml_id or 'n/a', exc_info=True)
res = {
'name': name,
'type': type,
'view_id': view_id,
'domain': domain,
'context': context,
'res_model': res_model,
'src_model': src_model,
'view_type': view_type,
'view_mode': view_mode,
'usage': usage,
'limit': limit,
'auto_refresh': auto_refresh,
}
if rec.get('groups'):
g_names = rec.get('groups','').split(',')
groups_value = []
for group in g_names:
if group.startswith('-'):
group_id = self.id_get(cr, group[1:])
groups_value.append((3, group_id))
else:
group_id = self.id_get(cr, group)
groups_value.append((4, group_id))
res['groups_id'] = groups_value
if rec.get('target'):
res['target'] = rec.get('target','')
if rec.get('multi'):
res['multi'] = eval(rec.get('multi', 'False'))
id = self.pool['ir.model.data']._update(cr, self.uid, 'ir.actions.act_window', self.module, res, xml_id, noupdate=self.isnoupdate(data_node), mode=self.mode)
self.idref[xml_id] = int(id)
if src_model:
#keyword = 'client_action_relate'
keyword = rec.get('key2','').encode('utf-8') or 'client_action_relate'
value = 'ir.actions.act_window,'+str(id)
replace = rec.get('replace','') or True
self.pool['ir.model.data'].ir_set(cr, self.uid, 'action', keyword, xml_id, [src_model], value, replace=replace, isobject=True, xml_id=xml_id)
# TODO add remove ir.model.data
def _tag_ir_set(self, cr, rec, data_node=None, mode=None):
if self.mode != 'init':
return
res = {}
for field in rec.findall('./field'):
f_name = field.get("name",'').encode('utf-8')
f_val = _eval_xml(self,field,self.pool, cr, self.uid, self.idref)
res[f_name] = f_val
self.pool['ir.model.data'].ir_set(cr, self.uid, res['key'], res['key2'], res['name'], res['models'], res['value'], replace=res.get('replace',True), isobject=res.get('isobject', False), meta=res.get('meta',None))
def _tag_workflow(self, cr, rec, data_node=None, mode=None):
if self.isnoupdate(data_node) and self.mode != 'init':
return
model = rec.get('model').encode('ascii')
w_ref = rec.get('ref')
if w_ref:
id = self.id_get(cr, w_ref)
else:
number_children = len(rec)
assert number_children > 0,\
'You must define a child node if you dont give a ref'
assert number_children == 1,\
'Only one child node is accepted (%d given)' % number_children
id = _eval_xml(self, rec[0], self.pool, cr, self.uid, self.idref)
uid = self.get_uid(cr, self.uid, data_node, rec)
openerp.workflow.trg_validate(
uid, model, id, rec.get('action').encode('ascii'), cr)
#
# Support two types of notation:
# name="Inventory Control/Sending Goods"
# or
# action="action_id"
# parent="parent_id"
#
def _tag_menuitem(self, cr, rec, data_node=None, mode=None):
rec_id = rec.get("id",'').encode('ascii')
self._test_xml_id(rec_id)
m_l = map(escape, escape_re.split(rec.get("name",'').encode('utf8')))
values = {'parent_id': False}
if rec.get('parent', False) is False and len(m_l) > 1:
# No parent attribute specified and the menu name has several menu components,
# try to determine the ID of the parent according to menu path
pid = False
res = None
values['name'] = m_l[-1]
m_l = m_l[:-1] # last part is our name, not a parent
for idx, menu_elem in enumerate(m_l):
if pid:
cr.execute('select id from ir_ui_menu where parent_id=%s and name=%s', (pid, menu_elem))
else:
cr.execute('select id from ir_ui_menu where parent_id is null and name=%s', (menu_elem,))
res = cr.fetchone()
if res:
pid = res[0]
else:
# the menuitem does't exist but we are in branch (not a leaf)
_logger.warning('Warning no ID for submenu %s of menu %s !', menu_elem, str(m_l))
pid = self.pool['ir.ui.menu'].create(cr, self.uid, {'parent_id' : pid, 'name' : menu_elem})
values['parent_id'] = pid
else:
# The parent attribute was specified, if non-empty determine its ID, otherwise
# explicitly make a top-level menu
if rec.get('parent'):
menu_parent_id = self.id_get(cr, rec.get('parent',''))
else:
# we get here with <menuitem parent="">, explicit clear of parent, or
# if no parent attribute at all but menu name is not a menu path
menu_parent_id = False
values = {'parent_id': menu_parent_id}
if rec.get('name'):
values['name'] = rec.get('name')
try:
res = [ self.id_get(cr, rec.get('id','')) ]
except:
res = None
if rec.get('action'):
a_action = rec.get('action','').encode('utf8')
# determine the type of action
action_type, action_id = self.model_id_get(cr, a_action)
action_type = action_type.split('.')[-1] # keep only type part
if not values.get('name') and action_type in ('act_window', 'wizard', 'url', 'client', 'server'):
a_table = 'ir_act_%s' % action_type.replace('act_', '')
cr.execute('select name from "%s" where id=%%s' % a_table, (int(action_id),))
resw = cr.fetchone()
if resw:
values['name'] = resw[0]
if not values.get('name'):
# ensure menu has a name
values['name'] = rec_id or '?'
if rec.get('sequence'):
values['sequence'] = int(rec.get('sequence'))
if rec.get('groups'):
g_names = rec.get('groups','').split(',')
groups_value = []
for group in g_names:
if group.startswith('-'):
group_id = self.id_get(cr, group[1:])
groups_value.append((3, group_id))
else:
group_id = self.id_get(cr, group)
groups_value.append((4, group_id))
values['groups_id'] = groups_value
pid = self.pool['ir.model.data']._update(cr, self.uid, 'ir.ui.menu', self.module, values, rec_id, noupdate=self.isnoupdate(data_node), mode=self.mode, res_id=res and res[0] or False)
if rec_id and pid:
self.idref[rec_id] = int(pid)
if rec.get('action') and pid:
action = "ir.actions.%s,%d" % (action_type, action_id)
self.pool['ir.model.data'].ir_set(cr, self.uid, 'action', 'tree_but_open', 'Menuitem', [('ir.ui.menu', int(pid))], action, True, True, xml_id=rec_id)
return 'ir.ui.menu', pid
def _assert_equals(self, f1, f2, prec=4):
return not round(f1 - f2, prec)
def _tag_assert(self, cr, rec, data_node=None, mode=None):
if self.isnoupdate(data_node) and self.mode != 'init':
return
rec_model = rec.get("model",'').encode('ascii')
model = self.pool[rec_model]
rec_id = rec.get("id",'').encode('ascii')
self._test_xml_id(rec_id)
rec_src = rec.get("search",'').encode('utf8')
rec_src_count = rec.get("count")
rec_string = rec.get("string",'').encode('utf8') or 'unknown'
ids = None
eval_dict = {'ref': _ref(self, cr)}
context = self.get_context(data_node, rec, eval_dict)
uid = self.get_uid(cr, self.uid, data_node, rec)
if rec_id:
ids = [self.id_get(cr, rec_id)]
elif rec_src:
q = unsafe_eval(rec_src, eval_dict)
ids = self.pool[rec_model].search(cr, uid, q, context=context)
if rec_src_count:
count = int(rec_src_count)
if len(ids) != count:
self.assertion_report.record_failure()
msg = 'assertion "%s" failed!\n' \
' Incorrect search count:\n' \
' expected count: %d\n' \
' obtained count: %d\n' \
% (rec_string, count, len(ids))
_logger.error(msg)
return
assert ids is not None,\
'You must give either an id or a search criteria'
ref = _ref(self, cr)
for id in ids:
brrec = model.browse(cr, uid, id, context)
class d(dict):
def __getitem__(self2, key):
if key in brrec:
return brrec[key]
return dict.__getitem__(self2, key)
globals_dict = d()
globals_dict['floatEqual'] = self._assert_equals
globals_dict['ref'] = ref
globals_dict['_ref'] = ref
for test in rec.findall('./test'):
f_expr = test.get("expr",'').encode('utf-8')
expected_value = _eval_xml(self, test, self.pool, cr, uid, self.idref, context=context) or True
expression_value = unsafe_eval(f_expr, globals_dict)
if expression_value != expected_value: # assertion failed
self.assertion_report.record_failure()
msg = 'assertion "%s" failed!\n' \
' xmltag: %s\n' \
' expected value: %r\n' \
' obtained value: %r\n' \
% (rec_string, etree.tostring(test), expected_value, expression_value)
_logger.error(msg)
return
else: # all tests were successful for this assertion tag (no break)
self.assertion_report.record_success()
def _tag_record(self, cr, rec, data_node=None, mode=None):
rec_model = rec.get("model").encode('ascii')
model = self.pool[rec_model]
rec_id = rec.get("id",'').encode('ascii')
rec_context = rec.get("context", None)
if rec_context:
rec_context = unsafe_eval(rec_context)
self._test_xml_id(rec_id)
# in update mode, the record won't be updated if the data node explicitely
# opt-out using @noupdate="1". A second check will be performed in
# ir.model.data#_update() using the record's ir.model.data `noupdate` field.
if self.isnoupdate(data_node) and self.mode != 'init':
# check if the xml record has no id, skip
if not rec_id:
return None
if '.' in rec_id:
module,rec_id2 = rec_id.split('.')
else:
module = self.module
rec_id2 = rec_id
id = self.pool['ir.model.data']._update_dummy(cr, self.uid, rec_model, module, rec_id2)
if id:
# if the resource already exists, don't update it but store
# its database id (can be useful)
self.idref[rec_id] = int(id)
return None
elif not self.nodeattr2bool(rec, 'forcecreate', True):
# if it doesn't exist and we shouldn't create it, skip it
return None
# else create it normally
res = {}
for field in rec.findall('./field'):
#TODO: most of this code is duplicated above (in _eval_xml)...
f_name = field.get("name").encode('utf-8')
f_ref = field.get("ref",'').encode('utf-8')
f_search = field.get("search",'').encode('utf-8')
f_model = field.get("model",'').encode('utf-8')
if not f_model and f_name in model._fields:
f_model = model._fields[f_name].comodel_name
f_use = field.get("use",'').encode('utf-8') or 'id'
f_val = False
if f_search:
q = unsafe_eval(f_search, self.idref)
assert f_model, 'Define an attribute model="..." in your .XML file !'
f_obj = self.pool[f_model]
# browse the objects searched
s = f_obj.browse(cr, self.uid, f_obj.search(cr, self.uid, q))
# column definitions of the "local" object
_fields = self.pool[rec_model]._fields
# if the current field is many2many
if (f_name in _fields) and _fields[f_name].type == 'many2many':
f_val = [(6, 0, map(lambda x: x[f_use], s))]
elif len(s):
# otherwise (we are probably in a many2one field),
# take the first element of the search
f_val = s[0][f_use]
elif f_ref:
if f_name in model._fields and model._fields[f_name].type == 'reference':
val = self.model_id_get(cr, f_ref)
f_val = val[0] + ',' + str(val[1])
else:
f_val = self.id_get(cr, f_ref)
else:
f_val = _eval_xml(self,field, self.pool, cr, self.uid, self.idref)
if f_name in model._fields:
if model._fields[f_name].type == 'integer':
f_val = int(f_val)
res[f_name] = f_val
id = self.pool['ir.model.data']._update(cr, self.uid, rec_model, self.module, res, rec_id or False, not self.isnoupdate(data_node), noupdate=self.isnoupdate(data_node), mode=self.mode, context=rec_context )
if rec_id:
self.idref[rec_id] = int(id)
if config.get('import_partial'):
cr.commit()
return rec_model, id
def _tag_template(self, cr, el, data_node=None, mode=None):
# This helper transforms a <template> element into a <record> and forwards it
tpl_id = el.get('id', el.get('t-name', '')).encode('ascii')
full_tpl_id = tpl_id
if '.' not in full_tpl_id:
full_tpl_id = '%s.%s' % (self.module, tpl_id)
# set the full template name for qweb <module>.<id>
if not el.get('inherit_id'):
el.set('t-name', full_tpl_id)
el.tag = 't'
else:
el.tag = 'data'
el.attrib.pop('id', None)
record_attrs = {
'id': tpl_id,
'model': 'ir.ui.view',
}
for att in ['forcecreate', 'context']:
if att in el.keys():
record_attrs[att] = el.attrib.pop(att)
Field = builder.E.field
name = el.get('name', tpl_id)
record = etree.Element('record', attrib=record_attrs)
record.append(Field(name, name='name'))
record.append(Field("qweb", name='type'))
record.append(Field(el.get('priority', "16"), name='priority'))
if 'inherit_id' in el.attrib:
record.append(Field(name='inherit_id', ref=el.get('inherit_id')))
if el.get('active') in ("True", "False"):
view_id = self.id_get(cr, tpl_id, raise_if_not_found=False)
if mode != "update" or not view_id:
record.append(Field(name='active', eval=el.get('active')))
if el.get('customize_show') in ("True", "False"):
record.append(Field(name='customize_show', eval=el.get('customize_show')))
groups = el.attrib.pop('groups', None)
if groups:
grp_lst = map(lambda x: "ref('%s')" % x, groups.split(','))
record.append(Field(name="groups_id", eval="[(6, 0, ["+', '.join(grp_lst)+"])]"))
if el.attrib.pop('page', None) == 'True':
record.append(Field(name="page", eval="True"))
if el.get('primary') == 'True':
# Pseudo clone mode, we'll set the t-name to the full canonical xmlid
el.append(
builder.E.xpath(
builder.E.attribute(full_tpl_id, name='t-name'),
expr=".",
position="attributes",
)
)
record.append(Field('primary', name='mode'))
# inject complete <template> element (after changing node name) into
# the ``arch`` field
record.append(Field(el, name="arch", type="xml"))
return self._tag_record(cr, record, data_node)
def id_get(self, cr, id_str, raise_if_not_found=True):
if id_str in self.idref:
return self.idref[id_str]
res = self.model_id_get(cr, id_str, raise_if_not_found)
if res and len(res)>1: res = res[1]
return res
def model_id_get(self, cr, id_str, raise_if_not_found=True):
model_data_obj = self.pool['ir.model.data']
mod = self.module
if '.' not in id_str:
id_str = '%s.%s' % (mod, id_str)
return model_data_obj.xmlid_to_res_model_res_id(
cr, self.uid, id_str,
raise_if_not_found=raise_if_not_found)
def parse(self, de, mode=None):
if de.tag != 'openerp':
raise Exception("Mismatch xml format: root tag must be `openerp`.")
for n in de.findall('./data'):
for rec in n:
if rec.tag in self._tags:
try:
self._tags[rec.tag](self.cr, rec, n, mode=mode)
except Exception, e:
self.cr.rollback()
exc_info = sys.exc_info()
raise ParseError, (misc.ustr(e), etree.tostring(rec).rstrip(), rec.getroottree().docinfo.URL, rec.sourceline), exc_info[2]
return True
def __init__(self, cr, module, idref, mode, report=None, noupdate=False):
self.mode = mode
self.module = module
self.cr = cr
self.idref = idref
self.pool = openerp.registry(cr.dbname)
self.uid = 1
if report is None:
report = assertion_report.assertion_report()
self.assertion_report = report
self.noupdate = noupdate
self._tags = {
'record': self._tag_record,
'delete': self._tag_delete,
'function': self._tag_function,
'menuitem': self._tag_menuitem,
'template': self._tag_template,
'workflow': self._tag_workflow,
'report': self._tag_report,
'ir_set': self._tag_ir_set,
'act_window': self._tag_act_window,
'url': self._tag_url,
'assert': self._tag_assert,
}
def convert_file(cr, module, filename, idref, mode='update', noupdate=False, kind=None, report=None, pathname=None):
if pathname is None:
pathname = os.path.join(module, filename)
fp = misc.file_open(pathname)
ext = os.path.splitext(filename)[1].lower()
try:
if ext == '.csv':
convert_csv_import(cr, module, pathname, fp.read(), idref, mode, noupdate)
elif ext == '.sql':
convert_sql_import(cr, fp)
elif ext == '.yml':
convert_yaml_import(cr, module, fp, kind, idref, mode, noupdate, report)
elif ext == '.xml':
convert_xml_import(cr, module, fp, idref, mode, noupdate, report)
elif ext == '.js':
pass # .js files are valid but ignored here.
else:
_logger.warning("Can't load unknown file type %s.", filename)
finally:
fp.close()
def convert_sql_import(cr, fp):
queries = fp.read().split(';')
for query in queries:
new_query = ' '.join(query.split())
if new_query:
cr.execute(new_query)
def convert_csv_import(cr, module, fname, csvcontent, idref=None, mode='init',
noupdate=False):
'''Import csv file :
quote: "
delimiter: ,
encoding: utf-8'''
if not idref:
idref={}
model = ('.'.join(fname.split('.')[:-1]).split('-'))[0]
#remove folder path from model
head, model = os.path.split(model)
input = cStringIO.StringIO(csvcontent) #FIXME
reader = csv.reader(input, quotechar='"', delimiter=',')
fields = reader.next()
fname_partial = ""
if config.get('import_partial'):
fname_partial = module + '/'+ fname
if not os.path.isfile(config.get('import_partial')):
pickle.dump({}, file(config.get('import_partial'),'w+'))
else:
data = pickle.load(file(config.get('import_partial')))
if fname_partial in data:
if not data[fname_partial]:
return
else:
for i in range(data[fname_partial]):
reader.next()
if not (mode == 'init' or 'id' in fields):
_logger.error("Import specification does not contain 'id' and we are in init mode, Cannot continue.")
return
uid = 1
datas = []
for line in reader:
if not (line and any(line)):
continue
try:
datas.append(map(misc.ustr, line))
except:
_logger.error("Cannot import the line: %s", line)
registry = openerp.registry(cr.dbname)
result, rows, warning_msg, dummy = registry[model].import_data(cr, uid, fields, datas,mode, module, noupdate, filename=fname_partial)
if result < 0:
# Report failed import and abort module install
raise Exception(_('Module loading %s failed: file %s could not be processed:\n %s') % (module, fname, warning_msg))
if config.get('import_partial'):
data = pickle.load(file(config.get('import_partial')))
data[fname_partial] = 0
pickle.dump(data, file(config.get('import_partial'),'wb'))
cr.commit()
#
# xml import/export
#
def convert_xml_import(cr, module, xmlfile, idref=None, mode='init', noupdate=False, report=None):
doc = etree.parse(xmlfile)
relaxng = etree.RelaxNG(
etree.parse(os.path.join(config['root_path'],'import_xml.rng' )))
try:
relaxng.assert_(doc)
except Exception:
_logger.error('The XML file does not fit the required schema !')
_logger.error(misc.ustr(relaxng.error_log.last_error))
raise
if idref is None:
idref={}
obj = xml_import(cr, module, idref, mode, report=report, noupdate=noupdate)
obj.parse(doc.getroot(), mode=mode)
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
james4424/nest-simulator | pynest/nest/tests/test_sp/test_sp_manager.py | 8 | 4250 | # -*- coding: utf-8 -*-
#
# test_sp_manager.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import nest
import unittest
from .utils import extract_dict_a_from_b
__author__ = 'naveau'
class TestStructuralPlasticityManager(unittest.TestCase):
def setUp(self):
nest.ResetKernel()
nest.set_verbosity('M_INFO')
self.exclude_synapse_model = [
'stdp_dopamine_synapse',
'stdp_dopamine_synapse_lbl',
'stdp_dopamine_synapse_hpc',
'stdp_dopamine_synapse_hpc_lbl',
'gap_junction',
'gap_junction_lbl'
]
def test_register_synapses(self):
for syn_model in nest.Models('synapses'):
if syn_model not in self.exclude_synapse_model:
nest.ResetKernel()
nest.SetDefaults(syn_model, {'delay': 0.5})
syn_dict = {
'model': syn_model,
'pre_synaptic_element': 'SE1',
'post_synaptic_element': 'SE2'
}
nest.SetKernelStatus({
'min_delay': 0.1,
'max_delay': 1.0,
'structural_plasticity_synapses': {'syn1': syn_dict}
})
kernel_status = nest.GetKernelStatus(
'structural_plasticity_synapses')
self.assertIn('syn1', kernel_status)
self.assertEqual(kernel_status['syn1'], extract_dict_a_from_b(
kernel_status['syn1'], syn_dict))
def test_min_max_delay_using_default_delay(self):
nest.ResetKernel()
delay = 1.0
syn_model = 'static_synapse'
nest.SetStructuralPlasticityStatus(
{
'structural_plasticity_synapses': {
'syn1': {
'model': syn_model,
'pre_synaptic_element': 'SE1',
'post_synaptic_element': 'SE2',
}
}
}
)
self.assertLessEqual(nest.GetKernelStatus('min_delay'), delay)
self.assertGreaterEqual(nest.GetKernelStatus('max_delay'), delay)
def test_synapse_creation(self):
for syn_model in nest.Models('synapses'):
if syn_model not in self.exclude_synapse_model:
nest.ResetKernel()
syn_dict = {
'model': syn_model,
'pre_synaptic_element': 'SE1',
'post_synaptic_element': 'SE2'
}
nest.SetStructuralPlasticityStatus({
'structural_plasticity_synapses': {'syn1': syn_dict}
})
neurons = nest.Create('iaf_neuron', 2, {
'synaptic_elements': {
'SE1': {'z': 10.0, 'growth_rate': 0.0},
'SE2': {'z': 10.0, 'growth_rate': 0.0}
}
})
nest.EnableStructuralPlasticity()
nest.Simulate(10.0)
status = nest.GetStatus(neurons, 'synaptic_elements')
for st_neuron in status:
self.assertEqual(10, st_neuron['SE1']['z_connected'])
self.assertEqual(10, st_neuron['SE2']['z_connected'])
self.assertEqual(
20, len(nest.GetConnections(neurons, neurons, syn_model)))
break
def suite():
test_suite = unittest.makeSuite(TestStructuralPlasticityManager, 'test')
return test_suite
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
tonybaloney/st2contrib | packs/networking_utils/actions/is_valid_ip.py | 4 | 2527 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ipaddress
from st2actions.runners.pythonrunner import Action
class IsValidIpAction(Action):
def run(self, ip_address, no_loopback=False, only_v4=False, only_v6=False):
"""
Is this a valid IP address?
Args:
ip_address: The IP address to validate.
no_loopback: Raise an exception for Loopback addresses.
only_v4: Raise an exception for IPv6 addresses.
only_v6: Raise an exception for IPv4 addresses.
Raises:
ValueError: On invalid IP, loopback or when requesting only v4/v6
be considered valid.
Returns:
dict: With extra information about the IP address.
"""
# As ipaddress is a backport from Python 3.3+ it errors if the
# ip address is a string and not unicode.
ip_obj = ipaddress.ip_address(unicode(ip_address))
results = {'version': ip_obj.version,
'is_private': ip_obj.is_private,
'is_link_local': ip_obj.is_link_local,
'is_unspecified': ip_obj.is_unspecified,
'reverse_pointer': ip_obj.reverse_pointer,
'is_multicast': ip_obj.is_multicast,
'is_reserved': ip_obj.is_reserved,
'is_loopback': ip_obj.is_loopback}
if only_v6 and ip_obj.version == 4:
raise ValueError("Valid IPv4 address, but IPv6 is required.")
elif only_v4 and ip_obj.version == 6:
raise ValueError("Valid IPv6 address, but IPv4 is required.")
if no_loopback and ip_obj.is_loopback:
raise ValueError("Address is a IPv{} loopback address".format(
ip_obj.version))
return results
| apache-2.0 |
XDestination/mongo-connector | mongo_connector/doc_managers/doc_manager_simulator.py | 2 | 6559 | # Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A class to serve as proxy for the target engine for testing.
Receives documents from the oplog worker threads and indexes them
into the backend.
Please look at the Solr and ElasticSearch doc manager classes for a sample
implementation with real systems.
"""
from threading import RLock
from mongo_connector import constants
from mongo_connector.errors import OperationFailed
from mongo_connector.doc_managers.doc_manager_base import DocManagerBase
from mongo_connector.compat import u
class DocumentStore(dict):
def __init__(self):
self._lock = RLock()
def __getitem__(self, key):
with self._lock:
return super(DocumentStore, self).__getitem__(key)
def __setitem__(self, key, value):
with self._lock:
return super(DocumentStore, self).__setitem__(key, value)
def __iter__(self):
def __myiter__():
with self._lock:
for item in super(DocumentStore, self).__iter__():
yield item
return __myiter__()
class Entry(object):
def __init__(self, doc, ns, ts):
self.doc, self.ns, self.ts = doc, ns, ts
self._id = self.doc['_id']
@property
def meta_dict(self):
return {'_id': self._id, 'ns': self.ns, '_ts': self.ts}
@property
def merged_dict(self):
d = self.doc.copy()
d.update(**self.meta_dict)
return d
def update(self, ns, ts):
self.ns, self.ts = ns, ts
class DocManager(DocManagerBase):
"""BackendSimulator emulates both a target DocManager and a server.
The DocManager class creates a connection to the backend engine and
adds/removes documents, and in the case of rollback, searches for them.
The reason for storing id/doc pairs as opposed to doc's is so that multiple
updates to the same doc reflect the most up to date version as opposed to
multiple, slightly different versions of a doc.
"""
def __init__(self, url=None, unique_key='_id',
auto_commit_interval=None,
chunk_size=constants.DEFAULT_MAX_BULK, **kwargs):
"""Creates a dictionary to hold document id keys mapped to the
documents as values.
"""
self.unique_key = unique_key
self.auto_commit_interval = auto_commit_interval
self.doc_dict = DocumentStore()
self.url = url
self.chunk_size = chunk_size
self.kwargs = kwargs
def stop(self):
"""Stops any running threads in the DocManager.
"""
pass
def update(self, document_id, update_spec, namespace, timestamp):
"""Apply updates given in update_spec to the document whose id
matches that of doc.
"""
document = self.doc_dict[document_id].doc
updated = self.apply_update(document, update_spec)
if "_id" in updated:
updated.pop("_id")
updated[self.unique_key] = document_id
self.upsert(updated, namespace, timestamp)
return updated
def upsert(self, doc, namespace, timestamp):
"""Adds a document to the doc dict.
"""
# Allow exceptions to be triggered (for testing purposes)
if doc.get('_upsert_exception'):
raise Exception("upsert exception")
doc_id = doc["_id"]
self.doc_dict[doc_id] = Entry(doc=doc, ns=namespace, ts=timestamp)
def insert_file(self, f, namespace, timestamp):
"""Inserts a file to the doc dict.
"""
doc = f.get_metadata()
doc['content'] = f.read()
self.doc_dict[f._id] = Entry(doc=doc, ns=namespace, ts=timestamp)
def remove(self, document_id, namespace, timestamp):
"""Removes the document from the doc dict.
"""
try:
entry = self.doc_dict[document_id]
entry.doc = None
entry.update(namespace, timestamp)
except KeyError:
raise OperationFailed("Document does not exist: %s"
% u(document_id))
def search(self, start_ts, end_ts):
"""Searches through all documents and finds all documents that were
modified or deleted within the range.
Since we have very few documents in the doc dict when this is called,
linear search is fine. This method is only used by rollbacks to query
all the documents in the target engine within a certain timestamp
window. The input will be two longs (converted from Bson timestamp)
which specify the time range. The start_ts refers to the timestamp
of the last oplog entry after a rollback. The end_ts is the timestamp
of the last document committed to the backend.
"""
for _id in self.doc_dict:
entry = self.doc_dict[_id]
if entry.ts <= end_ts or entry.ts >= start_ts:
yield entry.meta_dict
def commit(self):
"""Simply passes since we're not using an engine that needs commiting.
"""
pass
def get_last_doc(self):
"""Searches through the doc dict to find the document that was
modified or deleted most recently."""
return max(self.doc_dict.values(), key=lambda x: x.ts).meta_dict
def handle_command(self, command_doc, namespace, timestamp):
pass
def _search(self):
"""Returns all documents in the doc dict.
This function is not a part of the DocManager API, and is only used
to simulate searching all documents from a backend.
"""
results = []
for _id in self.doc_dict:
entry = self.doc_dict[_id]
if entry.doc is not None:
results.append(entry.merged_dict)
return results
def _delete(self):
"""Deletes all documents.
This function is not a part of the DocManager API, and is only used
to simulate deleting all documents from a backend.
"""
self.doc_dict = {}
| apache-2.0 |
shakamunyi/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/gaussian_test.py | 5 | 5286 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for initializers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import tensorflow as tf
class GaussianTest(tf.test.TestCase):
def testGaussianLogPDF(self):
with tf.Session():
batch_size = 6
mu = tf.constant([3.0] * batch_size)
sigma = tf.constant([math.sqrt(10.0)] * batch_size)
mu_v = 3.0
sigma_v = np.sqrt(10.0)
x = np.array([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0], dtype=np.float32)
gaussian = tf.contrib.distributions.Gaussian(mu=mu, sigma=sigma)
expected_log_pdf = np.log(
1 / np.sqrt(2 * np.pi) / sigma_v
* np.exp(-1.0 / (2 * sigma_v**2) * (x - mu_v)**2))
log_pdf = gaussian.log_pdf(x)
self.assertAllClose(expected_log_pdf, log_pdf.eval())
pdf = gaussian.pdf(x)
self.assertAllClose(np.exp(expected_log_pdf), pdf.eval())
def testGaussianLogPDFMultidimensional(self):
with tf.Session():
batch_size = 6
mu = tf.constant([[3.0, -3.0]] * batch_size)
sigma = tf.constant([[math.sqrt(10.0), math.sqrt(15.0)]] * batch_size)
mu_v = np.array([3.0, -3.0])
sigma_v = np.array([np.sqrt(10.0), np.sqrt(15.0)])
x = np.array([[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0]], dtype=np.float32).T
gaussian = tf.contrib.distributions.Gaussian(mu=mu, sigma=sigma)
expected_log_pdf = np.log(
1 / np.sqrt(2 * np.pi) / sigma_v
* np.exp(-1.0 / (2 * sigma_v**2) * (x - mu_v)**2))
log_pdf = gaussian.log_pdf(x)
log_pdf_values = log_pdf.eval()
self.assertEqual(log_pdf.get_shape(), (6, 2))
self.assertAllClose(expected_log_pdf, log_pdf_values)
pdf = gaussian.pdf(x)
pdf_values = pdf.eval()
self.assertEqual(pdf.get_shape(), (6, 2))
self.assertAllClose(np.exp(expected_log_pdf), pdf_values)
def testGaussianCDF(self):
with tf.Session():
batch_size = 6
mu = tf.constant([3.0] * batch_size)
sigma = tf.constant([math.sqrt(10.0)] * batch_size)
mu_v = 3.0
sigma_v = np.sqrt(10.0)
x = np.array([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0], dtype=np.float32)
gaussian = tf.contrib.distributions.Gaussian(mu=mu, sigma=sigma)
erf_fn = np.vectorize(math.erf)
# From Wikipedia
expected_cdf = 0.5 * (1.0 + erf_fn((x - mu_v)/(sigma_v*np.sqrt(2))))
cdf = gaussian.cdf(x)
self.assertAllClose(expected_cdf, cdf.eval())
def testGaussianEntropy(self):
with tf.Session():
mu_v = np.array([1.0, 1.0, 1.0])
sigma_v = np.array([[1.0, 2.0, 3.0]]).T
gaussian = tf.contrib.distributions.Gaussian(mu=mu_v, sigma=sigma_v)
sigma_broadcast = mu_v * sigma_v
expected_entropy = 0.5 * np.log(2*np.pi*np.exp(1)*sigma_broadcast**2)
self.assertAllClose(expected_entropy, gaussian.entropy().eval())
def testGaussianSample(self):
with tf.Session():
mu = tf.constant(3.0)
sigma = tf.constant(math.sqrt(10.0))
mu_v = 3.0
sigma_v = np.sqrt(10.0)
n = tf.constant(100000)
gaussian = tf.contrib.distributions.Gaussian(mu=mu, sigma=sigma)
samples = gaussian.sample(n, seed=137)
sample_values = samples.eval()
self.assertEqual(sample_values.shape, (100000,))
self.assertAllClose(sample_values.mean(), mu_v, atol=1e-2)
self.assertAllClose(sample_values.std(), sigma_v, atol=1e-1)
def testGaussianSampleMultiDimensional(self):
with tf.Session():
batch_size = 2
mu = tf.constant([[3.0, -3.0]] * batch_size)
sigma = tf.constant([[math.sqrt(10.0), math.sqrt(15.0)]] * batch_size)
mu_v = [3.0, -3.0]
sigma_v = [np.sqrt(10.0), np.sqrt(15.0)]
n = tf.constant(100000)
gaussian = tf.contrib.distributions.Gaussian(mu=mu, sigma=sigma)
samples = gaussian.sample(n, seed=137)
sample_values = samples.eval()
self.assertEqual(samples.get_shape(), (100000, batch_size, 2))
self.assertAllClose(sample_values[:, 0, 0].mean(), mu_v[0], atol=1e-2)
self.assertAllClose(sample_values[:, 0, 0].std(), sigma_v[0], atol=1e-1)
self.assertAllClose(sample_values[:, 0, 1].mean(), mu_v[1], atol=1e-2)
self.assertAllClose(sample_values[:, 0, 1].std(), sigma_v[1], atol=1e-1)
def testNegativeSigmaFails(self):
with tf.Session():
gaussian = tf.contrib.distributions.Gaussian(
mu=[1.],
sigma=[-5.],
name='G')
with self.assertRaisesOpError(
r'should contain only positive values'):
gaussian.mean.eval()
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
digimarc/django | django/contrib/admin/migrations/0001_initial.py | 142 | 1657 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.contrib.admin.models
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '__first__'),
]
operations = [
migrations.CreateModel(
name='LogEntry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('action_time', models.DateTimeField(auto_now=True, verbose_name='action time')),
('object_id', models.TextField(null=True, verbose_name='object id', blank=True)),
('object_repr', models.CharField(max_length=200, verbose_name='object repr')),
('action_flag', models.PositiveSmallIntegerField(verbose_name='action flag')),
('change_message', models.TextField(verbose_name='change message', blank=True)),
('content_type', models.ForeignKey(to_field='id', blank=True, to='contenttypes.ContentType', null=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-action_time',),
'db_table': 'django_admin_log',
'verbose_name': 'log entry',
'verbose_name_plural': 'log entries',
},
bases=(models.Model,),
managers=[
('objects', django.contrib.admin.models.LogEntryManager()),
],
),
]
| bsd-3-clause |
KevinFasusi/supplychainpy | supplychainpy/_helpers/_config_file_paths.py | 1 | 2886 | # Copyright (c) 2015-2016, The Authors and Contributors
# <see AUTHORS file>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
# following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from supplychainpy._helpers._pickle_config import deserialise_config, serialise_config
APP_DIR = os.path.dirname(__file__, )
REL_PATH_GENETIC_ALGORITHM = '../sample_data/population_genome.txt'
REL_PATH_DASH = 'dash.pickle'
REL_PATH_ARCHIVE = '../../_archive/'
REL_PATH_CSV_MANAGEMENT_CONFIG = '../_pickled/csv_management_config.pickle'
REL_PATH_APPLICATION_CONFIG = '../_pickled/application_config.pickle'
REL_PATH_PICKLE = '../_pickled/'
ABS_FILE_PATH_DASH = os.path.abspath(os.path.join(APP_DIR, '../_pickled/', REL_PATH_DASH))
ABS_FILE_PATH_APPLICATION_CONFIG = os.path.abspath(os.path.join(APP_DIR, '../_pickled/', REL_PATH_APPLICATION_CONFIG))
ABS_FILE_PATH_CSV_MANAGEMENT_CONFIG = os.path.abspath(os.path.join(APP_DIR, REL_PATH_CSV_MANAGEMENT_CONFIG))
ABS_FILE_PATH_ARCHIVE = os.path.abspath(os.path.join(APP_DIR, REL_PATH_ARCHIVE))
ABS_FILE_GENETIC_ALGORITHM = os.path.abspath(os.path.join(APP_DIR, REL_PATH_ARCHIVE))
ABS_FILE_PICKLE = os.path.abspath(os.path.join(APP_DIR, REL_PATH_PICKLE))
def main():
print(ABS_FILE_PICKLE)
#config = deserialise_config(ABS_FILE_PATH_APPLICATION_CONFIG)
#config['file']= 'data4.csv'
#serialise_config(config, ABS_FILE_PATH_APPLICATION_CONFIG)
#print(deserialise_config(ABS_FILE_PATH_APPLICATION_CONFIG))
if __name__ == '__main__':
main()
| bsd-3-clause |
manasi24/tempest | tempest/api_schema/response/compute/v2_1/services.py | 23 | 2372 | # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
list_services = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'services': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'id': {'type': ['integer', 'string'],
'pattern': '^[a-zA-Z!]*@[0-9]+$'},
'zone': {'type': 'string'},
'host': {'type': 'string'},
'state': {'type': 'string'},
'binary': {'type': 'string'},
'status': {'type': 'string'},
'updated_at': {'type': ['string', 'null']},
'disabled_reason': {'type': ['string', 'null']}
},
'additionalProperties': False,
'required': ['id', 'zone', 'host', 'state', 'binary',
'status', 'updated_at', 'disabled_reason']
}
}
},
'additionalProperties': False,
'required': ['services']
}
}
enable_service = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'service': {
'type': 'object',
'properties': {
'status': {'type': 'string'},
'binary': {'type': 'string'},
'host': {'type': 'string'}
},
'additionalProperties': False,
'required': ['status', 'binary', 'host']
}
},
'additionalProperties': False,
'required': ['service']
}
}
| apache-2.0 |
cryptofun/honey | share/seeds/generate-seeds.py | 1 | 4185 | #!/usr/bin/python
# Copyright (c) 2014 Wladmir J. van der Laan
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef H_CHAINPARAMSSEEDS\n')
g.write('#define H_CHAINPARAMSSEEDS\n')
g.write('// List of fixed seed nodes for the honey network\n')
g.write('// AUTOGENERATED by contrib/devtools/generate-seeds.py\n\n')
g.write('// Each line contains a 16-byte IPv6 address and a port.\n')
g.write('// IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 15714)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 25714)
g.write('#endif\n')
if __name__ == '__main__':
main()
| mit |
githubutilities/LeetCode | Python/invert-binary-tree.py | 3 | 2242 | # Time: O(n)
# Space: O(h)
#
# Invert a binary tree.
#
# 4
# / \
# 2 7
# / \ / \
# 1 3 6 9
# to
# 4
# / \
# 7 2
# / \ / \
# 9 6 3 1
#
# Time: O(n)
# Space: O(w), w is the max number of the nodes of the levels.
# BFS solution.
class Queue:
def __init__(self):
self.data = collections.deque()
def push(self, x):
self.data.append(x)
def peek(self):
return self.data[0]
def pop(self):
return self.data.popleft()
def size(self):
return len(self.data)
def empty(self):
return len(self.data) == 0
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param {TreeNode} root
# @return {TreeNode}
def invertTree(self, root):
if root is not None:
nodes = Queue()
nodes.push(root)
while not nodes.empty():
node = nodes.pop()
node.left, node.right = node.right, node.left
if node.left is not None:
nodes.push(node.left)
if node.right is not None:
nodes.push(node.right)
return root
# Time: O(n)
# Space: O(h)
# Stack solution.
class Solution2:
# @param {TreeNode} root
# @return {TreeNode}
def invertTree(self, root):
if root is not None:
nodes = []
nodes.append(root)
while nodes:
node = nodes.pop()
node.left, node.right = node.right, node.left
if node.left is not None:
nodes.append(node.left)
if node.right is not None:
nodes.append(node.right)
return root
# Time: O(n)
# Space: O(h)
# DFS, Recursive solution.
class Solution3:
# @param {TreeNode} root
# @return {TreeNode}
def invertTree(self, root):
if root is not None:
root.left, root.right = self.invertTree(root.right), \
self.invertTree(root.left)
return root
| mit |
RachellCalhoun/craftsite | crafts/migrations/0001_initial.py | 1 | 1079 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CraftPost',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('title', models.CharField(max_length=200)),
('text', models.TextField(null=True, blank=True)),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(null=True, blank=True)),
('photo', models.ImageField(null=True, upload_to='', blank=True)),
('link', models.URLField(null=True, blank=True)),
('author', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
]
| gpl-3.0 |
pombreda/formalchemy | formalchemy/tests/__init__.py | 2 | 13869 | # -*- coding: utf-8 -*-
import os
import glob
import logging
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
from BeautifulSoup import BeautifulSoup # required for html prettification
from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy.ext.declarative import declarative_base
logging.getLogger('sqlalchemy').setLevel(logging.ERROR)
from formalchemy.fields import Field, SelectFieldRenderer, FieldRenderer, TextFieldRenderer, EscapingReadonlyRenderer
import formalchemy.fatypes as types
def ls(*args):
dirname = os.path.dirname(__file__)
args = list(args)
args.append('*')
files = glob.glob(os.path.join(dirname, *args))
files.sort()
for f in files:
if os.path.isdir(f):
print 'D %s' % os.path.basename(f)
else:
print '- %s' % os.path.basename(f)
def cat(*args):
filename = os.path.join(os.path.dirname(__file__), *args)
print open(filename).read()
engine = create_engine('sqlite://')
Session = scoped_session(sessionmaker(autoflush=False, bind=engine))
Base = declarative_base(engine, mapper=Session.mapper)
class One(Base):
__tablename__ = 'ones'
id = Column(Integer, primary_key=True)
class Two(Base):
__tablename__ = 'twos'
id = Column(Integer, primary_key=True)
foo = Column(Integer, default='133', nullable=True)
class TwoInterval(Base):
__tablename__ = 'two_interval'
id = Column(Integer, primary_key=True)
foo = Column(Interval, nullable=False)
class TwoFloat(Base):
__tablename__ = 'two_floats'
id = Column(Integer, primary_key=True)
foo = Column(Float, nullable=False)
from decimal import Decimal
class TwoNumeric(Base):
__tablename__ = 'two_numerics'
id = Column(Integer, primary_key=True)
foo = Column(Numeric, nullable=True)
class Three(Base):
__tablename__ = 'threes'
id = Column(Integer, primary_key=True)
foo = Column(Text, nullable=True)
bar = Column(Text, nullable=True)
class CheckBox(Base):
__tablename__ = 'checkboxes'
id = Column(Integer, primary_key=True)
field = Column(Boolean, nullable=False)
class PrimaryKeys(Base):
__tablename__ = 'primary_keys'
id = Column(Integer, primary_key=True)
id2 = Column(String(10), primary_key=True)
field = Column(String(10), nullable=False)
class Binaries(Base):
__tablename__ = 'binaries'
id = Column(Integer, primary_key=True)
file = Column(LargeBinary, nullable=True)
class ConflictNames(Base):
__tablename__ = 'conflict_names'
id = Column(Integer, primary_key=True)
model = Column(String, nullable=True)
data = Column(String, nullable=True)
session = Column(String, nullable=True)
vertices = Table('vertices', Base.metadata,
Column('id', Integer, primary_key=True),
Column('x1', Integer),
Column('y1', Integer),
Column('x2', Integer),
Column('y2', Integer),
)
class Point(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __composite_values__(self):
return [self.x, self.y]
def __eq__(self, other):
return other.x == self.x and other.y == self.y
def __ne__(self, other):
return not self.__eq__(other)
class Vertex(object):
pass
Session.mapper(Vertex, vertices, properties={
'start':composite(Point, vertices.c.x1, vertices.c.y1),
'end':composite(Point, vertices.c.x2, vertices.c.y2)
})
class PointFieldRenderer(FieldRenderer):
def render(self, **kwargs):
from formalchemy import helpers as h
data = self.field.parent.data
x_name = self.name + '-x'
y_name = self.name + '-y'
x_value = (data is not None and x_name in data) and data[x_name] or str(self.field.value and self.field.value.x or '')
y_value = (data is not None and y_name in data) and data[y_name] or str(self.field.value and self.field.value.y or '')
return h.text_field(x_name, value=x_value) + h.text_field(y_name, value=y_value)
def deserialize(self):
data = self.field.parent.data.getone(self.name + '-x'), self.field.parent.data.getone(self.name + '-y')
return Point(*[int(i) for i in data])
# todo? test a CustomBoolean, using a TypeDecorator --
# http://www.sqlalchemy.org/docs/04/types.html#types_custom
# probably need to add _renderer attr and check
# isinstance(getattr(myclass, '_renderer', type(myclass)), Boolean)
# since the custom class shouldn't really inherit from Boolean
properties = Table('properties', Base.metadata,
Column('id', Integer, primary_key=True),
Column('a', Integer))
class Property(Base):
__table__ = properties
foo = column_property(properties.c.a.label('foo'))
# bar = column_property(properties.c.a) # TODO
class Recursive(Base):
__tablename__ = 'recursives'
id = Column(Integer, primary_key=True)
foo = Column(Text, nullable=True)
parent_id = Column(Integer, ForeignKey("recursives.id"))
parent = relation('Recursive', primaryjoin=parent_id==id, uselist=False, remote_side=parent_id)
class Synonym(Base):
__tablename__ = 'synonyms'
id = Column(Integer, primary_key=True)
_foo = Column(Text, nullable=True)
def _set_foo(self, foo):
self._foo = "SOMEFOO " + foo
def _get_foo(self):
return self._foo
foo = synonym('_foo', descriptor=property(_get_foo, _set_foo))
class OTOChild(Base):
__tablename__ = 'one_to_one_child'
id = Column(Integer, primary_key=True)
baz = Column(Text, nullable=False)
def __unicode__(self):
return self.baz
def __repr__(self):
return '<OTOChild %s>' % self.baz
class OTOParent(Base):
__tablename__ = 'one_to_one_parent'
id = Column(Integer, primary_key=True)
oto_child_id = Column(Integer, ForeignKey('one_to_one_child.id'), nullable=False)
child = relation(OTOChild, uselist=False)
class Order(Base):
__tablename__ = 'orders'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('users.id'), nullable=False)
quantity = Column(Integer, nullable=False)
def __unicode__(self):
return 'Quantity: %s' % self.quantity
def __repr__(self):
return '<Order for user %s: %s>' % (self.user_id, self.quantity)
class OptionalOrder(Base): # the user is optional, not the order
__tablename__ = 'optional_orders'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('users.id'))
quantity = Column(Integer)
user = relation('User')
def __unicode__(self):
return 'Quantity: %s' % self.quantity
def __repr__(self):
return '<OptionalOrder for user %s: %s>' % (self.user_id, self.quantity)
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
email = Column(Unicode(40), unique=True, nullable=False)
password = Column(Unicode(20), nullable=False)
name = Column(Unicode(30))
orders = relation(Order, backref='user', order_by='Order.quantity')
orders_dl = dynamic_loader(Order)
def __unicode__(self):
return self.name
def __repr__(self):
return '<User %s>' % self.name
def __html__(self):
return '<a href="mailto:%s">%s</a>' % (self.email, self.name)
class NaturalOrder(Base):
__tablename__ = 'natural_orders'
id = Column(Integer, primary_key=True)
user_email = Column(String, ForeignKey('natural_users.email'), nullable=False)
quantity = Column(Integer, nullable=False)
def __repr__(self):
return 'Quantity: %s' % self.quantity
class NaturalUser(Base):
__tablename__ = 'natural_users'
email = Column(Unicode(40), primary_key=True)
password = Column(Unicode(20), nullable=False)
name = Column(Unicode(30))
orders = relation(NaturalOrder, backref='user')
def __repr__(self):
return self.name
class Function(Base):
__tablename__ = 'functions'
foo = Column(TIMESTAMP, primary_key=True, default=func.current_timestamp())
# test property order for non-declarative mapper
addresses = Table('email_addresses', Base.metadata,
Column('address_id', Integer, Sequence('address_id_seq', optional=True), primary_key = True),
Column('address', String(40)),
)
users2 = Table('users2', Base.metadata,
Column('user_id', Integer, Sequence('user_id_seq', optional=True), primary_key = True),
Column('address_id', Integer, ForeignKey(addresses.c.address_id)),
Column('name', String(40), nullable=False)
)
class Address(object): pass
class User2(object): pass
mapper(Address, addresses)
mapper(User2, users2, properties={'address': relation(Address)})
class Manual(object):
a = Field()
b = Field(type=types.Integer).dropdown([('one', 1), ('two', 2)], multiple=True)
d = Field().textarea((80, 10))
class OrderUser(Base):
__tablename__ = 'order_users'
user_id = Column(Integer, ForeignKey('users.id'), primary_key=True)
order_id = Column(Integer, ForeignKey('orders.id'), primary_key=True)
user = relation(User)
order = relation(Order)
def __repr__(self):
return 'OrderUser(%s, %s)' % (self.user_id, self.order_id)
class OrderUserTag(Base):
__table__ = Table('order_user_tags', Base.metadata,
Column('id', Integer, primary_key=True),
Column('user_id', Integer, nullable=False),
Column('order_id', Integer, nullable=False),
Column('tag', String, nullable=False),
ForeignKeyConstraint(['user_id', 'order_id'], ['order_users.user_id', 'order_users.order_id']))
order_user = relation(OrderUser)
class Order__User(Base):
__table__ = join(Order.__table__, User.__table__).alias('__orders__users')
class Aliases(Base):
__tablename__ = 'table_with_aliases'
id = Column(Integer, primary_key=True)
text = Column('row_text', Text)
Base.metadata.create_all()
session = Session()
primary1 = PrimaryKeys(id=1, id2='22', field='value1')
primary2 = PrimaryKeys(id=1, id2='33', field='value2')
parent = OTOParent()
parent.child = OTOChild(baz='baz')
bill = User(email='[email protected]',
password='1234',
name='Bill')
john = User(email='[email protected]',
password='5678',
name='John')
order1 = Order(user=bill, quantity=10)
order2 = Order(user=john, quantity=5)
order3 = Order(user=john, quantity=6)
nbill = NaturalUser(email='[email protected]',
password='1234',
name='Natural Bill')
njohn = NaturalUser(email='[email protected]',
password='5678',
name='Natural John')
norder1 = NaturalOrder(user=nbill, quantity=10)
norder2 = NaturalOrder(user=njohn, quantity=5)
orderuser1 = OrderUser(user_id=1, order_id=1)
orderuser2 = OrderUser(user_id=1, order_id=2)
conflict_names = ConflictNames(data='data', model='model', session='session')
session.commit()
from formalchemy import config
from formalchemy.forms import FieldSet as DefaultFieldSet
from formalchemy.tables import Grid as DefaultGrid
from formalchemy.fields import Field
from formalchemy import templates
from formalchemy.validators import ValidationError
if templates.HAS_MAKO:
if not isinstance(config.engine, templates.MakoEngine):
raise ValueError('MakoEngine is not the default engine: %s' % config.engine)
else:
raise ImportError('mako is required for testing')
def pretty_html(html):
if isinstance(html, unicode):
html = html.encode('utf-8')
soup = BeautifulSoup(str(html))
return soup.prettify().strip()
class FieldSet(DefaultFieldSet):
def render(self, lang=None):
if self.readonly:
html = pretty_html(DefaultFieldSet.render(self))
for name, engine in templates.engines.items():
if isinstance(engine, config.engine.__class__):
continue
html_engine = pretty_html(engine('fieldset_readonly', fieldset=self))
assert html == html_engine, (name, html, html_engine)
return html
html = pretty_html(DefaultFieldSet.render(self))
for name, engine in templates.engines.items():
if isinstance(engine, config.engine.__class__):
continue
html_engine = pretty_html(engine('fieldset', fieldset=self))
assert html == html_engine, (name, html, html_engine)
return html
class Grid(DefaultGrid):
def render(self, lang=None):
if self.readonly:
html = pretty_html(DefaultGrid.render(self))
for name, engine in templates.engines.items():
if isinstance(engine, config.engine.__class__):
continue
html_engine = pretty_html(engine('grid_readonly', collection=self))
assert html == html_engine, (name, html, html_engine)
return html
html = pretty_html(DefaultGrid.render(self))
for name, engine in templates.engines.items():
if isinstance(engine, config.engine.__class__):
continue
html_engine = pretty_html(engine('grid', collection=self))
assert html == html_engine, (name, html, html_engine)
return html
original_renderers = FieldSet.default_renderers.copy()
def configure_and_render(fs, **options):
fs.configure(**options)
return fs.render()
if not hasattr(__builtins__, 'sorted'):
# 2.3 support
def sorted(L, key=lambda a: a):
L = list(L)
L.sort(lambda a, b: cmp(key(a), key(b)))
return L
class ImgRenderer(TextFieldRenderer):
def render(self, *args, **kwargs):
return '<img src="%s">' % self.value
import fake_module
fake_module.__dict__.update({
'fs': FieldSet(User),
})
import sys
sys.modules['library'] = fake_module
| mit |
da4089/simplefix | simplefix/parser.py | 1 | 8483 | #! /usr/bin/env python
########################################################################
# SimpleFIX
# Copyright (C) 2016-2020, David Arnold.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
########################################################################
from .constants import EQUALS_BYTE, SOH_BYTE
from .message import FixMessage, fix_val
from .data import RAW_DATA_TAGS, RAW_LEN_TAGS
# By default, messages are terminated by the Checksum (10) tag.
DEFAULT_STOP_TAG = 10
class FixParser(object): # skipcq: PYL-R0205
"""FIX protocol message parser.
This class translates FIX application messages in raw (wire)
format into instance of the FixMessage class.
It does not perform any validation of the fields, their presence
or absence in a particular message, the data types of fields, or
the values of enumerations.
It is suitable for streaming processing, accumulating byte data
from a network connection, and returning complete messages as they
are delivered, potentially in multiple fragments."""
def __init__(self):
"""Constructor."""
# Internal buffer used to accumulate message data.
self.buf = b""
# Parsed "tag=value" pairs, removed from the buffer, but not
# yet returned as a message.
self.pairs = []
# Copy raw field length tags.
self.raw_len_tags = RAW_LEN_TAGS[:]
# Copy raw field data tags.
self.raw_data_tags = RAW_DATA_TAGS[:]
# Parsed length of data field.
self.raw_len = 0
# Stop tag (default).
self.stop_tag = DEFAULT_STOP_TAG
# Stop character (optional).
self.stop_char = None
def add_raw(self, length_tag, value_tag):
"""Define the tags used for a private raw data field.
:param length_tag: tag number of length field.
:param value_tag: tag number of value field.
Data fields are not terminated by the SOH character as is usual for
FIX, but instead have a second, preceding field that specifies the
length of the value in bytes. The parser is initialised with all the
data fields defined in FIX.5.0, but if your application uses private
data fields, you can add them here, and the parser will process them
correctly. """
self.raw_len_tags.append(length_tag)
self.raw_data_tags.append(value_tag)
def remove_raw(self, length_tag, value_tag):
"""Remove the tags for a data type field.
:param length_tag: tag number of the length field.
:param value_tag: tag number of the value field.
You can remove either private or standard data field definitions in
case a particular application uses them for a field of a different
type. """
self.raw_len_tags.remove(length_tag)
self.raw_data_tags.remove(value_tag)
def reset(self):
"""Reset the internal parser state.
This will discard any appended buffer content, and any fields
parsed so far."""
self.buf = b""
self.pairs = []
self.raw_len = 0
def set_message_terminator(self, tag=None, char=None):
"""Set the end-of-message detection scheme.
:param tag: FIX tag number of terminating field. Default is 10.
:param char: Alternative, terminating character.
By default, messages are terminated by the FIX Checksum (10)
field. This can be changed to use a different tag, or a reserved
character using this function.
Note that only one of 'tag' or 'char' should be set, using a
named parameter."""
if tag is not None and char is not None:
raise ValueError("Only supply one of 'tag' or 'char'.")
if tag is not None:
self.stop_tag = tag
self.stop_char = None
else:
self.stop_tag = None
bs = char.encode() if type(char) is str else char
self.stop_char = bs[0]
def append_buffer(self, buf):
"""Append a byte string to the parser buffer.
:param buf: byte string to append.
The parser maintains an internal buffer of bytes to be parsed.
As raw data is read, it can be appended to this buffer. Each
call to get_message() will try to remove the bytes of a
complete messages from the head of the buffer."""
self.buf += fix_val(buf)
def get_buffer(self):
"""Return a reference to the internal buffer."""
return self.buf
def get_message(self):
"""Process the accumulated buffer and return the first message.
If the buffer starts with FIX fields other than BeginString
(8), these are discarded until the start of a message is
found.
If no BeginString (8) field is found, this function returns
None. Similarly, if (after a BeginString) no Checksum (10)
field is found, the function returns None.
Otherwise, it returns a simplefix.FixMessage instance
initialised with the fields from the first complete message
found in the buffer."""
# Break buffer into tag=value pairs.
start = 0
point = 0
in_tag = True
tag = 0
while point < len(self.buf):
c = self.buf[point]
if in_tag and c == EQUALS_BYTE:
tag_string = self.buf[start:point]
point += 1
tag = int(tag_string)
if tag in self.raw_data_tags and self.raw_len > 0:
if self.raw_len > len(self.buf) - point:
break
value = self.buf[point:point + self.raw_len]
self.pairs.append((tag, value))
self.buf = self.buf[point + self.raw_len + 1:]
point = 0
self.raw_len = 0
start = point
else:
in_tag = False
start = point
elif c == self.stop_char:
if start != point:
value = self.buf[start:point]
self.pairs.append((tag, value))
self.buf = self.buf[point + 1:]
else:
self.buf = self.buf[1:]
break
elif c == SOH_BYTE:
value = self.buf[start:point]
self.pairs.append((tag, value))
self.buf = self.buf[point + 1:]
if tag == self.stop_tag:
break
start = 0
point = -1
in_tag = True
if tag in self.raw_len_tags:
self.raw_len = int(value)
point += 1
# Check first pair is FIX BeginString.
while self.pairs and self.pairs[0][0] != 8:
# Discard pairs until we find the beginning of a message.
self.pairs.pop(0)
if len(self.pairs) == 0:
return None
# Look for checksum.
if self.stop_tag is not None:
if self.pairs[-1][0] != self.stop_tag:
return None
# Found checksum, so we have a complete message.
m = FixMessage()
for tag, value in self.pairs:
m.append_pair(tag, value)
self.pairs = []
return m
########################################################################
| mit |
ajs-sun/linux | tools/perf/scripts/python/sctop.py | 1996 | 2102 | # system call top
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
raw_syscalls__sys_enter(**locals())
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
hoangminhitvn/flask | flask/lib/python2.7/site-packages/werkzeug/_compat.py | 448 | 6184 | import sys
import operator
import functools
try:
import builtins
except ImportError:
import __builtin__ as builtins
PY2 = sys.version_info[0] == 2
_identity = lambda x: x
if PY2:
unichr = unichr
text_type = unicode
string_types = (str, unicode)
integer_types = (int, long)
int_to_byte = chr
iterkeys = lambda d, *args, **kwargs: d.iterkeys(*args, **kwargs)
itervalues = lambda d, *args, **kwargs: d.itervalues(*args, **kwargs)
iteritems = lambda d, *args, **kwargs: d.iteritems(*args, **kwargs)
iterlists = lambda d, *args, **kwargs: d.iterlists(*args, **kwargs)
iterlistvalues = lambda d, *args, **kwargs: d.iterlistvalues(*args, **kwargs)
iter_bytes = lambda x: iter(x)
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
def fix_tuple_repr(obj):
def __repr__(self):
cls = self.__class__
return '%s(%s)' % (cls.__name__, ', '.join(
'%s=%r' % (field, self[index])
for index, field in enumerate(cls._fields)
))
obj.__repr__ = __repr__
return obj
def implements_iterator(cls):
cls.next = cls.__next__
del cls.__next__
return cls
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
def native_string_result(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs).encode('utf-8')
return functools.update_wrapper(wrapper, func)
def implements_bool(cls):
cls.__nonzero__ = cls.__bool__
del cls.__bool__
return cls
from itertools import imap, izip, ifilter
range_type = xrange
from StringIO import StringIO
from cStringIO import StringIO as BytesIO
NativeStringIO = BytesIO
def make_literal_wrapper(reference):
return lambda x: x
def normalize_string_tuple(tup):
"""Normalizes a string tuple to a common type. Following Python 2
rules, upgrades to unicode are implicit.
"""
if any(isinstance(x, text_type) for x in tup):
return tuple(to_unicode(x) for x in tup)
return tup
def try_coerce_native(s):
"""Try to coerce a unicode string to native if possible. Otherwise,
leave it as unicode.
"""
try:
return str(s)
except UnicodeError:
return s
wsgi_get_bytes = _identity
def wsgi_decoding_dance(s, charset='utf-8', errors='replace'):
return s.decode(charset, errors)
def wsgi_encoding_dance(s, charset='utf-8', errors='replace'):
if isinstance(s, bytes):
return s
return s.encode(charset, errors)
def to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None:
return None
if isinstance(x, (bytes, bytearray, buffer)):
return bytes(x)
if isinstance(x, unicode):
return x.encode(charset, errors)
raise TypeError('Expected bytes')
def to_native(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None or isinstance(x, str):
return x
return x.encode(charset, errors)
else:
unichr = chr
text_type = str
string_types = (str, )
integer_types = (int, )
iterkeys = lambda d, *args, **kwargs: iter(d.keys(*args, **kwargs))
itervalues = lambda d, *args, **kwargs: iter(d.values(*args, **kwargs))
iteritems = lambda d, *args, **kwargs: iter(d.items(*args, **kwargs))
iterlists = lambda d, *args, **kwargs: iter(d.lists(*args, **kwargs))
iterlistvalues = lambda d, *args, **kwargs: iter(d.listvalues(*args, **kwargs))
int_to_byte = operator.methodcaller('to_bytes', 1, 'big')
def iter_bytes(b):
return map(int_to_byte, b)
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
fix_tuple_repr = _identity
implements_iterator = _identity
implements_to_string = _identity
implements_bool = _identity
native_string_result = _identity
imap = map
izip = zip
ifilter = filter
range_type = range
from io import StringIO, BytesIO
NativeStringIO = StringIO
def make_literal_wrapper(reference):
if isinstance(reference, text_type):
return lambda x: x
return lambda x: x.encode('latin1')
def normalize_string_tuple(tup):
"""Ensures that all types in the tuple are either strings
or bytes.
"""
tupiter = iter(tup)
is_text = isinstance(next(tupiter, None), text_type)
for arg in tupiter:
if isinstance(arg, text_type) != is_text:
raise TypeError('Cannot mix str and bytes arguments (got %s)'
% repr(tup))
return tup
try_coerce_native = _identity
def wsgi_get_bytes(s):
return s.encode('latin1')
def wsgi_decoding_dance(s, charset='utf-8', errors='replace'):
return s.encode('latin1').decode(charset, errors)
def wsgi_encoding_dance(s, charset='utf-8', errors='replace'):
if isinstance(s, bytes):
return s.decode('latin1', errors)
return s.encode(charset).decode('latin1', errors)
def to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None:
return None
if isinstance(x, (bytes, bytearray, memoryview)):
return bytes(x)
if isinstance(x, str):
return x.encode(charset, errors)
raise TypeError('Expected bytes')
def to_native(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None or isinstance(x, str):
return x
return x.decode(charset, errors)
def to_unicode(x, charset=sys.getdefaultencoding(), errors='strict',
allow_none_charset=False):
if x is None:
return None
if not isinstance(x, bytes):
return text_type(x)
if charset is None and allow_none_charset:
return x
return x.decode(charset, errors)
| bsd-3-clause |
sogelink/ansible | lib/ansible/modules/network/layer3/net_vrf.py | 96 | 1854 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: net_vrf
version_added: "2.4"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage VRFs on network devices
description:
- This module provides declarative management of VRFs
on network devices.
options:
name:
description:
- Name of the VRF.
interfaces:
description:
- List of interfaces the VRF should be configured on.
aggregate:
description: List of VRFs definitions
purge:
description:
- Purge VRFs not defined in the I(aggregate) parameter.
default: no
state:
description:
- State of the VRF configuration.
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: Create VRF named MANAGEMENT
net_vrf:
name: MANAGEMENT
- name: remove VRF named MANAGEMENT
net_vrf:
name: MANAGEMENT
state: absent
- name: Create aggregate of VRFs with purge
net_vrf:
aggregate:
- { name: test4, rd: "1:204" }
- { name: test5, rd: "1:205" }
state: present
purge: yes
- name: Delete aggregate of VRFs
net_vrf:
aggregate:
- name: test2
- name: test3
- name: test4
- name: test5
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- vrf definition MANAGEMENT
"""
| gpl-3.0 |
popazerty/e2-gui | lib/python/Plugins/Extensions/MovieBrowser/plugin.py | 6 | 300332 | # 2013.06.25 13:29:48 CEST
#Embedded file name: /usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/MovieBrowser.py
from Components.ActionMap import ActionMap
from Components.config import config, configfile, ConfigDirectory, ConfigSlider, ConfigSubsection, ConfigSelection, getConfigListEntry
from Components.ConfigList import ConfigListScreen
from Components.FileList import FileList
from Components.Label import Label
from Components.Language import language
from Components.MenuList import MenuList
from Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmapAlphaTest
from Components.Pixmap import Pixmap
from Components.ProgressBar import ProgressBar
from Components.Sources.List import List
from enigma import eListboxPythonMultiContent, eConsoleAppContainer, ePoint, eServiceReference, eTimer, getDesktop, gFont, loadPic, loadPNG, RT_HALIGN_LEFT
from Plugins.Plugin import PluginDescriptor
from re import findall, search, split, sub
from Screens.ChannelSelection import ChannelSelection
from Screens.InfoBar import MoviePlayer as OrgMoviePlayer
from Screens.MessageBox import MessageBox
from Screens.Screen import Screen
from Screens.VirtualKeyBoard import VirtualKeyBoard
from string import find
from Tools.Directories import fileExists
from twisted.web import client, error
from twisted.web.client import getPage
from urllib2 import Request, urlopen, URLError, HTTPError
import os, re, statvfs, socket, sys, time, urllib
from os import system, walk
config.plugins.moviebrowser = ConfigSubsection()
config.plugins.moviebrowser.style = ConfigSelection(default='backdrop', choices=[('backdrop', _('Backdrop')), ('posterwall', _('Posterwall'))])
config.plugins.moviebrowser.moviefolder = ConfigDirectory(default='/media/')
config.plugins.moviebrowser.cachefolder = ConfigSelection(default='/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/db/cache', choices=[('/media/usb/moviebrowser/cache', _('/media/usb')), ('/media/hdd/moviebrowser/cache', _('/media/hdd')), ('/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/db/cache', _('Default'))])
config.plugins.moviebrowser.database = ConfigSelection(default='tmdb', choices=[('tmdb', _('TMDb')), ('imdb', _('IMDb')), ('tvdb', _('TheTVDb'))])
config.plugins.moviebrowser.language = ConfigSelection(default='en', choices=[('de', _('Deutsch')),
('en', _('English')),
('es', _('Espanol')),
('ru', _('P\xd1\x83\xd1\x81\xd1\x81\xd0\xba\xd0\xb8\xd0\xb9'))])
deskWidth = getDesktop(0).size().width()
if deskWidth == 1280:
config.plugins.moviebrowser.plugin_size = ConfigSelection(default='full', choices=[('full', _('Plugin Full')), ('normal', _('Plugin Normal'))])
else:
config.plugins.moviebrowser.plugin_size = ConfigSelection(default='normal', choices=[('full', _('Plugin Full')), ('normal', _('Plugin Normal'))])
config.plugins.moviebrowser.filter = ConfigSelection(default=':::', choices=[(':::', _('Movies + Series')), (':::Movie:::', _('Movies')), (':::Series:::', _('Series'))])
config.plugins.moviebrowser.backdrops = ConfigSelection(default='show', choices=[('show', _('Show')), ('hide', _('Hide'))])
config.plugins.moviebrowser.plotfull = ConfigSelection(default='hide', choices=[('show', _('Show')), ('hide', _('Hide'))])
config.plugins.moviebrowser.plotfont = ConfigSelection(default='normal', choices=[('normal', _('Normal')), ('small', _('Small'))])
config.plugins.moviebrowser.sortorder = ConfigSelection(default='name', choices=[('name', _('Movie Title A-Z')),
('name_reverse', _('Movie Title Z-A')),
('rating', _('Movie Rating 0-10')),
('rating_reverse', _('Movie Rating 10-0')),
('year', _('Movie Release Date Ascending')),
('year_reverse', _('Movie Release Date Descending')),
('date', _('File Creation Date Ascending')),
('date_reverse', _('File Creation Date Descending')),
('folder', _('Movie Folder Ascending')),
('folder_reverse', _('Movie Folder Descending'))])
config.plugins.moviebrowser.reset = ConfigSelection(default='no', choices=[('no', _('No')), ('yes', _('Yes'))])
config.plugins.moviebrowser.menu = ConfigSelection(default='no', choices=[('no', _('No')), ('yes', _('Yes'))])
config.plugins.moviebrowser.showtv = ConfigSelection(default='show', choices=[('show', _('Show')), ('hide', _('Hide'))])
config.plugins.moviebrowser.m1v = ConfigSelection(default='no', choices=[('no', _('No')), ('yes', _('Yes'))])
config.plugins.moviebrowser.transparency = ConfigSlider(default=200, limits=(0, 255))
config.plugins.moviebrowser.color = ConfigSelection(default='#007895BC', choices=[('#007895BC', _('Default')),
('#00F0A30A', _('Amber')),
('#00825A2C', _('Brown')),
('#000050EF', _('Cobalt')),
('#00911D10', _('Crimson')),
('#001BA1E2', _('Cyan')),
('#00008A00', _('Emerald')),
('#0070AD11', _('Green')),
('#006A00FF', _('Indigo')),
('#00A4C400', _('Lime')),
('#00A61D4D', _('Magenta')),
('#0076608A', _('Mauve')),
('#006D8764', _('Olive')),
('#00C3461B', _('Orange')),
('#00F472D0', _('Pink')),
('#00E51400', _('Red')),
('#007A3B3F', _('Sienna')),
('#00647687', _('Steel')),
('#00149BAF', _('Teal')),
('#006C0AAB', _('Violet')),
('#00BF9217', _('Yellow'))])
def applySkinVars(skin, dict):
for key in dict.keys():
try:
skin = skin.replace('{' + key + '}', dict[key])
except Exception as e:
print e, '@key=', key
return skin
def transHTML(text):
text = text.replace(' ', ' ').replace('ß', 'ss').replace('"', '"').replace('–', '-').replace('Ø', '').replace('„', '"').replace('“', '"').replace('’', "'").replace('>', '>').replace('<', '<')
text = text.replace('©.*', ' ').replace('&', '&').replace('ü', '\xc3\x83\xc2\xbc').replace('ä', '\xc3\x83\xc2\xa4').replace('ö', '\xc3\x83\xc2\xb6').replace('é', '\xc3\xa9').replace('…', '...').replace('è', '\xc3\xa8').replace('à', '\xc3\xa0')
text = text.replace('Ü', 'Ue').replace('Ä', 'Ae').replace('Ö', 'Oe').replace('"', '"').replace('"', '"').replace('&', 'und').replace(''', "'").replace(''', "'").replace('…', '...').replace('Ä', '\xc3\x83\xe2\x80\x9e').replace('Ö', '\xc3\x83-').replace('Ü', '\xc3\x83\xc5\x93').replace('ß', '\xc3\x83\xc5\xb8').replace('ä', '\xc3\x83\xc2\xa4').replace('ö', '\xc3\x83\xc2\xb6').replace('ü', '\xc3\x83\xc2\xbc')
return text
class MoviePlayer(OrgMoviePlayer):
def __init__(self, session, service):
self.session = session
OrgMoviePlayer.__init__(self, session, service)
self.skinName = "MoviePlayer"
OrgMoviePlayer.WithoutStopClose = True
def doEofInternal(self, playing):
self.leavePlayer()
def leavePlayer(self):
self.close()
class movieBrowserBackdrop(Screen):
skin = '\n\t\t\t<screen position="center,center" size="1024,576" flags="wfNoBorder" title=" " >\n\t\t\t\t<widget name="backdrop" position="0,0" size="1024,576" alphatest="on" transparent="0" zPosition="1" />\n\t\t\t\t<widget name="infoback" position="15,15" size="460,400" alphatest="blend" transparent="1" zPosition="2" />\n\t\t\t\t<widget name="plotfullback" position="549,15" size="460,400" alphatest="blend" transparent="1" zPosition="2" />\n\n\t\t\t\t<widget name="name" position="25,16" size="440,55" font="Regular;24" foregroundColor="#FFFFFF" valign="center" transparent="1" zPosition="3" />\n\t\t\t\t<eLabel text="Rating:" position="25,70" size="125,25" font="Regular;20" halign="left" foregroundColor="{color}" transparent="1" zPosition="4" />\n\t\t\t\t<widget name="ratings" position="25,100" size="210,21" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/ratings.png" borderWidth="0" orientation="orHorizontal" transparent="1" zPosition="5" />\n\t\t\t\t<widget name="ratingsback" position="25,100" size="210,21" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/ratings_back.png" alphatest="on" zPosition="6" />\n\t\t\t\t<widget name="ratingtext" position="245,100" size="40,25" font="Regular;20" foregroundColor="#FFFFFF" transparent="1" zPosition="7" />\n\t\t\t\t<eLabel text="Director:" position="25,140" size="125,25" font="Regular;20" halign="left" foregroundColor="{color}" transparent="1" zPosition="8" />\n\t\t\t\t<widget name="director" position="25,170" size="285,50" font="Regular;20" foregroundColor="#FFFFFF" transparent="1" zPosition="9" />\n\t\t\t\t<eLabel text="Country:" position="320,140" size="125,25" font="Regular;20" halign="left" foregroundColor="{color}" transparent="1" zPosition="10" />\n\t\t\t\t<widget name="country" position="320,170" size="125,25" font="Regular;20" foregroundColor="#FFFFFF" transparent="1" zPosition="11" />\n\t\t\t\t<eLabel text="Actors:" position="25,210" size="125,25" font="Regular;20" halign="left" foregroundColor="{color}" transparent="1" zPosition="12" />\n\t\t\t\t<widget name="actors" position="25,240" size="285,95" font="Regular;20" foregroundColor="#FFFFFF" transparent="1" zPosition="13" />\n\t\t\t\t<eLabel text="Year:" position="320,210" size="125,25" font="Regular;20" halign="left" foregroundColor="{color}" transparent="1" zPosition="14" />\n\t\t\t\t<widget name="year" position="320,240" size="125,25" font="Regular;20" foregroundColor="#FFFFFF" transparent="1" zPosition="15" />\n\t\t\t\t<eLabel text="Runtime:" position="320,280" size="125,25" font="Regular;20" halign="left" foregroundColor="{color}" transparent="1" zPosition="16" />\n\t\t\t\t<widget name="runtime" position="320,310" size="125,25" font="Regular;20" foregroundColor="#FFFFFF" transparent="1" zPosition="17" />\n\t\t\t\t<eLabel text="Genres:" position="25,350" size="125,25" font="Regular;20" halign="left" foregroundColor="{color}" transparent="1" zPosition="18" />\n\t\t\t\t<widget name="genres" position="25,380" size="440,25" font="Regular;20" foregroundColor="#FFFFFF" transparent="1" zPosition="19" />\n\t\t\t\t<widget name="plotfull" position="559,22" size="440,390" font="{font}" foregroundColor="#FFFFFF" transparent="1" zPosition="20" />\n\t\t\t\t<widget name="eposter" position="25,50" size="440,330" alphatest="on" transparent="1" zPosition="21" />\n\n\t\t\t\t<widget name="poster0" position="-42,426" size="92,138" zPosition="21" transparent="1" alphatest="on" />\n\t\t\t\t<widget name="poster_back0" position="-42,426" size="92,138" zPosition="22" transparent="1" alphatest="blend" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/poster_back.png" />\n\t\t\t\t<widget name="poster1" position="55,426" size="92,138" zPosition="21" transparent="1" alphatest="on" />\n\t\t\t\t<widget name="poster_back1" position="55,426" size="92,138" zPosition="22" transparent="1" alphatest="blend" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/poster_back.png" />\n\t\t\t\t<widget name="poster2" position="152,426" size="92,138" zPosition="21" transparent="1" alphatest="on" />\n\t\t\t\t<widget name="poster_back2" position="152,426" size="92,138" zPosition="22" transparent="1" alphatest="blend" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/poster_back.png" />\n\t\t\t\t<widget name="poster3" position="249,426" size="92,138" zPosition="21" transparent="1" alphatest="on" />\n\t\t\t\t<widget name="poster_back3" position="249,426" size="92,138" zPosition="22" transparent="1" alphatest="blend" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/poster_back.png" />\n\t\t\t\t<widget name="poster4" position="346,426" size="92,138" zPosition="21" transparent="1" alphatest="on" />\n\t\t\t\t<widget name="poster_back4" position="346,426" size="92,138" zPosition="22" transparent="1" alphatest="blend" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/poster_back.png" />\n\t\t\t\t<widget name="poster5" position="443,352" size="138,207" zPosition="21" transparent="1" alphatest="on" />\n\t\t\t\t<widget name="poster6" position="586,426" size="92,138" zPosition="21" transparent="1" alphatest="on" />\n\t\t\t\t<widget name="poster_back6" position="586,426" size="92,138" zPosition="22" transparent="1" alphatest="blend" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/poster_back.png" />\n\t\t\t\t<widget name="poster7" position="683,426" size="92,138" zPosition="21" transparent="1" alphatest="on" />\n\t\t\t\t<widget name="poster_back7" position="683,426" size="92,138" zPosition="22" transparent="1" alphatest="blend" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/poster_back.png" />\n\t\t\t\t<widget name="poster8" position="780,426" size="92,138" zPosition="21" transparent="1" alphatest="on" />\n\t\t\t\t<widget name="poster_back8" position="780,426" size="92,138" zPosition="22" transparent="1" alphatest="blend" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/poster_back.png" />\n\t\t\t\t<widget name="poster9" position="877,426" size="92,138" zPosition="21" transparent="1" alphatest="on" />\n\t\t\t\t<widget name="poster_back9" position="877,426" size="92,138" zPosition="22" transparent="1" alphatest="blend" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/poster_back.png" />\n\t\t\t\t<widget name="poster10" position="974,426" size="92,138" zPosition="21" transparent="1" alphatest="on" />\n\t\t\t\t<widget name="poster_back10" position="974,426" size="92,138" zPosition="22" transparent="1" alphatest="blend" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/poster_back.png" />\n\t\t\t</screen>'
skinHD = '\n\t\t\t<screen position="center,center" size="1280,720" flags="wfNoBorder" title=" " >\n\t\t\t\t<widget name="backdrop" position="0,0" size="1280,720" alphatest="on" transparent="0" zPosition="1" />\n\t\t\t\t<widget name="infoback" position="25,25" size="525,430" alphatest="blend" transparent="1" zPosition="2" />\n\t\t\t\t<widget name="plotfullback" position="730,25" size="525,430" alphatest="blend" transparent="1" zPosition="2" />\n\n\t\t\t\t<widget name="name" position="40,30" size="495,70" font="Regular;28" foregroundColor="#FFFFFF" valign="center" transparent="1" zPosition="3" />\n\t\t\t\t<eLabel text="Rating:" position="40,100" size="125,28" font="Regular;22" halign="left" foregroundColor="{color}" transparent="1" zPosition="4" />\n\t\t\t\t<widget name="ratings" position="40,130" size="210,21" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/ratings.png" borderWidth="0" orientation="orHorizontal" transparent="1" zPosition="5" />\n\t\t\t\t<widget name="ratingsback" position="40,130" size="210,21" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/ratings_back.png" alphatest="on" zPosition="6" />\n\t\t\t\t<widget name="ratingtext" position="260,130" size="50,28" font="Regular;22" foregroundColor="#FFFFFF" transparent="1" zPosition="7" />\n\t\t\t\t<eLabel text="Director:" position="40,170" size="125,28" font="Regular;22" halign="left" foregroundColor="{color}" transparent="1" zPosition="8" />\n\t\t\t\t<widget name="director" position="40,200" size="320,28" font="Regular;22" foregroundColor="#FFFFFF" transparent="1" zPosition="9" />\n\t\t\t\t<eLabel text="Country:" position="370,170" size="125,28" font="Regular;22" halign="left" foregroundColor="{color}" transparent="1" zPosition="10" />\n\t\t\t\t<widget name="country" position="370,200" size="125,28" font="Regular;22" foregroundColor="#FFFFFF" transparent="1" zPosition="11" />\n\t\t\t\t<eLabel text="Actors:" position="40,240" size="125,28" font="Regular;22" halign="left" foregroundColor="{color}" transparent="1" zPosition="12" />\n\t\t\t\t<widget name="actors" position="40,270" size="320,102" font="Regular;22" foregroundColor="#FFFFFF" transparent="1" zPosition="13" />\n\t\t\t\t<eLabel text="Year:" position="370,240" size="125,28" font="Regular;22" halign="left" foregroundColor="{color}" transparent="1" zPosition="14" />\n\t\t\t\t<widget name="year" position="370,270" size="125,28" font="Regular;22" foregroundColor="#FFFFFF" transparent="1" zPosition="15" />\n\t\t\t\t<eLabel text="Runtime:" position="370,310" size="125,28" font="Regular;22" halign="left" foregroundColor="{color}" transparent="1" zPosition="16" />\n\t\t\t\t<widget name="runtime" position="370,340" size="125,28" font="Regular;22" foregroundColor="#FFFFFF" transparent="1" zPosition="17" />\n\t\t\t\t<eLabel text="Genres:" position="40,380" size="125,28" font="Regular;22" halign="left" foregroundColor="{color}" transparent="1" zPosition="18" />\n\t\t\t\t<widget name="genres" position="40,410" size="500,28" font="Regular;22" foregroundColor="#FFFFFF" transparent="1" zPosition="19" />\n\t\t\t\t<widget name="plotfull" position="745,40" size="495,393" font="{font}" foregroundColor="#FFFFFF" transparent="1" zPosition="20" />\n\t\t\t\t<widget name="eposter" position="37,53" size="500,375" alphatest="on" transparent="1" zPosition="21" />\n\n\t\t\t\t<widget name="poster0" position="-65,535" size="100,150" zPosition="21" transparent="1" alphatest="on" />\n\t\t\t\t<widget name="poster_back0" position="-65,535" size="100,150" zPosition="22" transparent="1" alphatest="blend" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/poster_backHD.png" />\n\t\t\t\t<widget name="poster1" position="40,535" size="100,150" zPosition="21" transparent="1" alphatest="on" />\n\t\t\t\t<widget name="poster_back1" position="40,535" size="100,150" zPosition="22" transparent="1" alphatest="blend" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/poster_backHD.png" />\n\t\t\t\t<widget name="poster2" position="145,535" size="100,150" zPosition="21" transparent="1" alphatest="on" />\n\t\t\t\t<widget name="poster_back2" position="145,535" size="100,150" zPosition="22" transparent="1" alphatest="blend" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/poster_backHD.png" />\n\t\t\t\t<widget name="poster3" position="250,535" size="100,150" zPosition="21" transparent="1" alphatest="on" />\n\t\t\t\t<widget name="poster_back3" position="250,535" size="100,150" zPosition="22" transparent="1" alphatest="blend" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/poster_backHD.png" />\n\t\t\t\t<widget name="poster4" position="355,535" size="100,150" zPosition="21" transparent="1" alphatest="on" />\n\t\t\t\t<widget name="poster_back4" position="355,535" size="100,150" zPosition="22" transparent="1" alphatest="blend" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/poster_backHD.png" />\n\t\t\t\t<widget name="poster5" position="460,535" size="100,150" zPosition="21" transparent="1" alphatest="on" />\n\t\t\t\t<widget name="poster_back5" position="460,535" size="100,150" zPosition="22" transparent="1" alphatest="blend" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/poster_backHD.png" />\n\t\t\t\t<widget name="poster6" position="565,455" size="150,225" zPosition="21" transparent="1" alphatest="on" />\n\t\t\t\t<widget name="poster7" position="720,535" size="100,150" zPosition="21" transparent="1" alphatest="on" />\n\t\t\t\t<widget name="poster_back7" position="720,535" size="100,150" zPosition="22" transparent="1" alphatest="blend" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/poster_backHD.png" />\n\t\t\t\t<widget name="poster8" position="825,535" size="100,150" zPosition="21" transparent="1" alphatest="on" />\n\t\t\t\t<widget name="poster_back8" position="825,535" size="100,150" zPosition="22" transparent="1" alphatest="blend" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/poster_backHD.png" />\n\t\t\t\t<widget name="poster9" position="930,535" size="100,150" zPosition="21" transparent="1" alphatest="on" />\n\t\t\t\t<widget name="poster_back9" position="930,535" size="100,150" zPosition="22" transparent="1" alphatest="blend" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/poster_backHD.png" />\n\t\t\t\t<widget name="poster10" position="1035,535" size="100,150" zPosition="21" transparent="1" alphatest="on" />\n\t\t\t\t<widget name="poster_back10" position="1035,535" size="100,150" zPosition="22" transparent="1" alphatest="blend" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/poster_backHD.png" />\n\t\t\t\t<widget name="poster11" position="1140,535" size="100,150" zPosition="21" transparent="1" alphatest="on" />\n\t\t\t\t<widget name="poster_back11" position="1140,535" size="100,150" zPosition="22" transparent="1" alphatest="blend" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/poster_backHD.png" />\n\t\t\t\t<widget name="poster12" position="1245,535" size="100,150" zPosition="21" transparent="1" alphatest="on" />\n\t\t\t\t<widget name="poster_back12" position="1245,535" size="100,150" zPosition="22" transparent="1" alphatest="blend" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/poster_backHD.png" />\n\t\t\t</screen>'
def __init__(self, session, index, content, filter):
if config.plugins.moviebrowser.plugin_size.value == 'full':
self.xd = False
color = config.plugins.moviebrowser.color.value
if config.plugins.moviebrowser.plotfont.value == 'normal':
font = 'Regular;22'
else:
font = 'Regular;20'
self.dict = {'color': color,
'font': font}
self.skin = applySkinVars(movieBrowserBackdrop.skinHD, self.dict)
else:
self.xd = True
color = config.plugins.moviebrowser.color.value
if config.plugins.moviebrowser.plotfont.value == 'normal':
font = 'Regular;20'
else:
font = 'Regular;18'
self.dict = {'color': color,
'font': font}
self.skin = applySkinVars(movieBrowserBackdrop.skin, self.dict)
Screen.__init__(self, session)
self.oldService = self.session.nav.getCurrentlyPlayingServiceReference()
self.hideflag = True
self.ready = False
self.renew = False
self.update = False
self.tmdbposter = False
self.content = content
self.filter = filter
if config.plugins.moviebrowser.language.value == 'de':
self.language = '&language=de'
elif config.plugins.moviebrowser.language.value == 'es':
self.language = '&language=es'
elif config.plugins.moviebrowser.language.value == 'ru':
self.language = '&language=ru'
else:
self.language = '&language=en'
if config.plugins.moviebrowser.database.value == 'tmdb':
self.firstdatabase = 'tmdb'
elif config.plugins.moviebrowser.database.value == 'imdb':
self.firstdatabase = 'imdb'
else:
self.firstdatabase = 'tvdb'
self.namelist = []
self.movielist = []
self.datelist = []
self.infolist = []
self.plotlist = []
self.posterlist = []
self.backdroplist = []
self.contentlist = []
self['name'] = Label()
self['director'] = Label()
self['actors'] = Label()
self['year'] = Label()
self['runtime'] = Label()
self['country'] = Label()
self['genres'] = Label()
self['ratingtext'] = Label()
self['ratings'] = ProgressBar()
self['ratings'].hide()
self['ratingsback'] = Pixmap()
self['ratingsback'].hide()
self['infoback'] = Pixmap()
self['backdrop'] = Pixmap()
if config.plugins.moviebrowser.backdrops.value == 'show':
self.backdrops = True
else:
self.backdrops = False
if config.plugins.moviebrowser.plotfull.value == 'show':
self.plotfull = True
else:
self.plotfull = False
self['plotfull'] = Label()
self['plotfull'].hide()
self['plotfullback'] = Pixmap()
self['plotfullback'].hide()
self['poster0'] = Pixmap()
self['poster1'] = Pixmap()
self['poster2'] = Pixmap()
self['poster3'] = Pixmap()
self['poster4'] = Pixmap()
self['poster5'] = Pixmap()
self['poster6'] = Pixmap()
self['poster7'] = Pixmap()
self['poster8'] = Pixmap()
self['poster9'] = Pixmap()
self['poster10'] = Pixmap()
self['poster_back0'] = Pixmap()
self['poster_back1'] = Pixmap()
self['poster_back2'] = Pixmap()
self['poster_back3'] = Pixmap()
self['poster_back4'] = Pixmap()
self['poster_back7'] = Pixmap()
self['poster_back8'] = Pixmap()
self['poster_back9'] = Pixmap()
self['poster_back10'] = Pixmap()
if self.xd == True:
self.index = index
self.posterindex = 5
self.posterALL = 11
self['poster_back6'] = Pixmap()
else:
self.index = index
self.posterindex = 6
self.posterALL = 13
self['poster11'] = Pixmap()
self['poster12'] = Pixmap()
self['poster_back5'] = Pixmap()
self['poster_back11'] = Pixmap()
self['poster_back12'] = Pixmap()
self['eposter'] = Pixmap()
self['eposter'].hide()
self['actions'] = ActionMap(['OkCancelActions',
'DirectionActions',
'ColorActions',
'ChannelSelectBaseActions',
'HelpActions',
'InfobarMovieListActions',
'InfobarTeletextActions',
'MovieSelectionActions',
'MoviePlayerActions',
'NumberActions'], {'ok': self.ok,
'cancel': self.exit,
'right': self.rightDown,
'left': self.leftUp,
'down': self.down,
'up': self.up,
'nextBouquet': self.zap,
'prevBouquet': self.zap,
'red': self.deleteMovie,
'yellow': self.renewIMDb,
'green': self.renewTMDb,
#'blue': self.hideScreen,
'contextMenu': self.config,
'showEventInfo': self.togglePlotFull,
'startTeletext': self.editDatabase,
'leavePlayer': self.toggleBackdrops,
'movieList': self.updateDatabase,
'1': self.showMovies,
'2': self.switchView,
'3': self.showPath,
'4': self.filterSeasons,
'5': self.toogleContent,
#'6': self.wikipedia,
'7': self.filterDirector,
'8': self.filterActor,
'9': self.filterGenre,
'0': self.gotoEnd,
#'displayHelp': self.infoScreen
}, -1)
cmd = "mkdir /usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/db/;mkdir /usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/db/cache"
os.system(cmd)
self.updatefile = '/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/db/update'
self.blacklist = '/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/db/blacklist'
self.database = '/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/db/database'
self.onLayoutFinish.append(self.onLayoutFinished)
def onLayoutFinished(self):
if config.plugins.moviebrowser.showtv.value == 'hide':
self.session.nav.stopService()
if config.plugins.moviebrowser.m1v.value == 'yes':
self.session.nav.stopService()
f = open('/proc/stb/video/alpha', 'w')
f.write('%i' % config.plugins.moviebrowser.transparency.value)
f.close()
if self.xd == False:
self.infoBackPNG = '/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/info_backHD.png'
InfoBack = loadPic(self.infoBackPNG, 525, 430, 3, 0, 0, 1)
else:
self.infoBackPNG = '/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/info_back.png'
InfoBack = loadPic(self.infoBackPNG, 460, 400, 3, 0, 0, 1)
if InfoBack != None:
self['infoback'].instance.setPixmap(InfoBack)
self['infoback'].show()
if fileExists(self.database):
if fileExists(self.updatefile):
self.sortDatabase()
os.remove(self.updatefile)
self.reset = False
self.makeMovieBrowserTimer = eTimer()
self.makeMovieBrowserTimer.callback.append(self.makeMovies(self.filter))
self.makeMovieBrowserTimer.start(500, True)
else:
self.openTimer = eTimer()
self.openTimer.callback.append(self.openInfo)
self.openTimer.start(500, True)
def openInfo(self):
if fileExists('/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/db/reset'):
self.session.openWithCallback(self.reset_return, MessageBox, _('\nThe Movie Browser Database will be built now. This can take several minutes, depending on how many movies you have.\n\nBuild Movie Browser Database now?'), MessageBox.TYPE_YESNO)
else:
self.session.openWithCallback(self.first_return, MessageBox, _('\nBefore the Movie Browser Database is built for the first time, you should check your Movie Folder settings and change the Cache Folder to a hard drive disk for faster access or to a USB stick.'), MessageBox.TYPE_YESNO)
def first_return(self, answer):
if answer is True:
open('/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/db/reset', 'w').close()
self.session.openWithCallback(self.exit, movieBrowserConfig)
else:
self.close()
def reset_return(self, answer):
if answer is True:
self.reset = True
if fileExists('/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/db/reset'):
os.remove('/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/db/reset')
self.resetTimer = eTimer()
self.resetTimer.callback.append(self.database_return(True))
self.resetTimer.start(500, True)
else:
self.close()
def makeMovies(self, filter):
self.namelist = []
self.movielist = []
self.datelist = []
self.infolist = []
self.plotlist = []
self.posterlist = []
self.backdroplist = []
self.contentlist = []
self.filter = filter
if fileExists(self.database):
f = open(self.database, 'r')
for line in f:
if self.content in line and filter in line:
movieline = line.split(':::')
try:
name = movieline[0]
except IndexError:
name = ' '
try:
filename = movieline[1]
except IndexError:
filename = ' '
try:
date = movieline[2]
except IndexError:
date = ' '
try:
runtime = movieline[3]
except IndexError:
runtime = ' '
try:
rating = movieline[4]
except IndexError:
rating = ' '
try:
director = movieline[5]
except IndexError:
director = ' '
try:
actors = movieline[6]
except IndexError:
actors = ' '
try:
genres = movieline[7]
except IndexError:
genres = ' '
try:
year = movieline[8]
except IndexError:
year = ' '
try:
country = movieline[9]
except IndexError:
country = ' '
try:
plotfull = movieline[10]
except IndexError:
plotfull = ' '
try:
poster = movieline[11]
except IndexError:
poster = 'http://cf2.imgobject.com/t/p/w154' + '/default_poster.png'
try:
backdrop = movieline[12]
except IndexError:
backdrop = 'http://cf2.imgobject.com/t/p/w1280' + '/default_backdrop.png'
try:
content = movieline[13]
except IndexError:
content = 'Series'
self.namelist.append(name)
self.movielist.append(filename)
self.datelist.append(date)
res = []
res.append(runtime)
res.append(rating)
res.append(director)
res.append(actors)
res.append(genres)
res.append(year)
res.append(country)
self.infolist.append(res)
self.plotlist.append(plotfull)
self.posterlist.append(poster)
self.backdroplist.append(backdrop)
self.contentlist.append(content)
f.close()
self.maxentry = len(self.namelist)
if self.maxentry == 0:
self.ready = True
size = os.path.getsize(self.database)
if size < 10:
os.remove(self.database)
else:
self.makePoster()
if self.backdrops == True:
try:
self.showBackdrops(self.index)
except IndexError:
pass
else:
self.hideBackdrops()
try:
self.makeName(self.index)
except IndexError:
pass
try:
self.makeInfo(self.index)
except IndexError:
pass
if self.plotfull == True:
try:
self.showPlotFull(self.index)
except IndexError:
pass
self.ready = True
def updateDatabase(self):
if self.ready == True:
if os.path.exists(config.plugins.moviebrowser.moviefolder.value):
self.session.openWithCallback(self.database_return, MessageBox, _('\nUpdate Movie Browser Database?'), MessageBox.TYPE_YESNO)
else:
self.session.open(MessageBox, _('\nMovie Folder %s not reachable.\nMovie Browser Database Update canceled.') % str(config.plugins.moviebrowser.moviefolder.value), MessageBox.TYPE_ERROR)
def database_return(self, answer):
if answer is True:
open(self.updatefile, 'w').close()
self.update = True
self.ready = False
self.namelist = []
self.movielist = []
self.datelist = []
self.infolist = []
self.plotlist = []
self.posterlist = []
self.backdroplist = []
self.orphaned = 0
if fileExists(self.database):
allfiles = ':::'
folder = config.plugins.moviebrowser.moviefolder.value
for root, dirs, files in os.walk(folder, topdown=False):
for name in files:
filename = os.path.join(root, name)
filedate = os.path.getctime(filename)
allfiles = allfiles + str(filedate)
data = open(self.database).read()
for line in data.split('\n'):
movieline = line.split(':::')
try:
moviefolder = movieline[1]
moviedate = movieline[2]
except IndexError:
moviefolder = ''
moviedate = ''
if search(config.plugins.moviebrowser.moviefolder.value, moviefolder) is not None and search(moviedate, allfiles) is None:
self.orphaned += 1
data = data.replace(line + '\n', '')
os.rename(self.database, self.database + '-backup')
f = open(self.database, 'w')
f.write(data)
f.close()
del allfiles
data = open(self.database).read()
else:
open(self.database, 'w').close()
data = ''
if fileExists(self.blacklist):
blacklist = open(self.blacklist).read()
alldata = data + blacklist
else:
alldata = data
folder = config.plugins.moviebrowser.moviefolder.value
for root, dirs, files in os.walk(folder, topdown=False):
for name in files:
movie = sub('\\(', '.', name)
movie = sub('\\)', '.', movie)
if search(movie, alldata) is None:
if name.endswith('.ts') or name.endswith('.avi') or name.endswith('.divx') or name.endswith('.flv') or name.endswith('.iso') or name.endswith('.ISO') or name.endswith('.m2ts') or name.endswith('.mov') or name.endswith('.mp4') or name.endswith('.mpg') or name.endswith('.mpeg') or name.endswith('.mkv') or name.endswith('.vob'):
filename = os.path.join(root, name)
self.movielist.append(filename)
self.datelist.append(os.path.getctime(filename))
if name.endswith('.ts'):
name = sub('.*? - .*? - ', '', name)
name = sub('[.]ts', '', name)
else:
name = sub('[.]avi', '', name)
name = sub('[.]divx', '', name)
name = sub('[.]flv', '', name)
name = sub('[.]iso', '', name)
name = sub('[.]ISO', '', name)
name = sub('[.]m2ts', '', name)
name = sub('[.]mov', '', name)
name = sub('[.]mp4', '', name)
name = sub('[.]mpg', '', name)
name = sub('[.]mpeg', '', name)
name = sub('[.]mkv', '', name)
name = sub('[.]vob', '', name)
print name
self.namelist.append(name)
self.dbcount = 1
self.dbcountmax = len(self.movielist)
if self.dbcountmax == 0:
self.finished_update(False)
else:
self.name = self.namelist[0]
if config.plugins.moviebrowser.database.value == 'tmdb':
movie = self.name.replace(' ', '+').replace(':', '+').replace('-', '+').replace('_', '+')
self.firstdatabase = 'tmdb'
url = 'http://api.themoviedb.org/3/search/movie?api_key=dfc629f7ff6936a269f8c5cdb194c890&query=' + movie + self.language
self.getTMDbData(url, 1, '0', False)
elif config.plugins.moviebrowser.database.value == 'imdb':
movie = self.name.replace(' ', '+').replace(':', '+').replace('_', '+')
self.firstdatabase = 'imdb'
url = 'http://imdbapi.org/?title=%s&type=xml&plot=full&episode=0&limit=1&yg=0&mt=none&lang=en-US&offset=&aka=simple&release=simple&business=0&tech=0' % movie
self.getIMDbData(url, 1)
else:
movie = self.name.replace(' ', '+').replace(':', '+').replace('_', '+')
self.firstdatabase = 'tvdb'
movie = movie + 'FIN'
movie = sub('[Ss][0-9]+[Ee][0-9]+.*?FIN', '', movie)
movie = sub('FIN', '', movie)
url = 'http://www.thetvdb.com/api/GetSeries.php?seriesname=' + movie
self.getTVDbData(url, 1, '0')
def getIMDbData(self, url, runlevel):
agents = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)'}
request = Request(url, headers=agents)
try:
output = urlopen(request).read()
output = output.replace('\\u00e4', '\xc3\xa4').replace('\\u00f6', '\xc3\xb6').replace('\\u00fc', '\xc3\xbc').replace('\\u00c4', '\xc3\x84').replace('\\u00f6', '\xc3\x9f').replace('\\u00dc', '\xc3\x9c').replace('\\u00df', '\xc3\x9f').replace('\\u0026', '&').replace('\\u00e9', '\xc3\xa9').replace('\\u00e5', '\xc3\xa5').replace('\\"', '').replace('&', '&')
except URLError:
output = ''
except HTTPError:
output = ''
except socket.error:
output = ''
if search('"error":"Film not found"', output) is not None and runlevel == 1:
text = self.name.replace(' ', '%20')
self.trans = 'imdb'
self.translateGoogle(text)
elif search('"error":"Film not found"', output) is not None and runlevel == 2:
movie = self.name.replace(' ', '+').replace(':', '+').replace('_', '+')
movie = movie + 'FIN'
movie = sub('[Ss][0-9]+[Ee][0-9]+.*?FIN', '', movie)
movie = sub('FIN', '', movie)
url = 'http://www.thetvdb.com/api/GetSeries.php?seriesname=' + movie
self.getTVDbData(url, 1, '0')
else:
name = re.findall('<title>(.*?)</title>', output)
runtime = re.findall('<runtime><item>.*?([0-9]+ min).*?</item>', output)
rating = re.findall('<rating>(.*?)</rating>', output)
director = re.findall('<directors><item>(.*?)</item>', output)
actors = re.findall('<actors>(.*?)</actors>', output)
try:
actor = re.findall('<item>(.*?)</item>', actors[0])
except IndexError:
actor = []
genres = re.findall('<genres>(.*?)</genres>', output)
try:
genre = re.findall('<item>(.*?)</item>', genres[0])
except IndexError:
genre = []
year = re.findall('<year>(.*?)</year>', output)
country = re.findall('<country><item>(.*?)</item>', output)
plotfull = re.findall('<plot>(.*?)</plot>', output)
try:
self.namelist[self.dbcount - 1] = name[0]
except IndexError:
self.namelist[self.dbcount - 1] = self.name
res = []
try:
res.append(runtime[0])
except IndexError:
res.append(' ')
try:
res.append(rating[0])
except IndexError:
res.append('0.0')
try:
res.append(director[0])
except IndexError:
res.append(' ')
try:
actors = actor[0]
except IndexError:
actors = ' '
try:
actors = actors + ', ' + actor[1]
except IndexError:
pass
try:
actors = actors + ', ' + actor[2]
except IndexError:
pass
try:
actors = actors + ', ' + actor[3]
except IndexError:
pass
try:
actors = actors + ', ' + actor[4]
except IndexError:
pass
try:
actors = actors + ', ' + actor[5]
except IndexError:
pass
if len(actors) < 95:
try:
actors = actors + ', ' + actor[6]
except IndexError:
pass
res.append(actors)
try:
genres = genre[0]
except IndexError:
genres = ' '
try:
genres = genres + ', ' + genre[1]
except IndexError:
pass
try:
genres = genres + ', ' + genre[2]
except IndexError:
pass
try:
genres = genres + ', ' + genre[3]
except IndexError:
pass
try:
genres = genres + ', ' + genre[4]
except IndexError:
pass
try:
res.append(genres)
except IndexError:
res.append(' ')
try:
res.append(year[0])
except IndexError:
res.append(' ')
try:
res.append(country[0].replace('Germany', 'GER'))
except IndexError:
res.append(' ')
self.infolist.append(res)
try:
self.plotlist.append(plotfull[0].replace('\\', ''))
except IndexError:
self.plotlist.append(' ')
movie = self.name.replace(' ', '+').replace(':', '+').replace('-', '+').replace('_', '+')
url = 'http://api.themoviedb.org/3/search/movie?api_key=dfc629f7ff6936a269f8c5cdb194c890&query=' + movie + self.language
self.getTMDbPoster(url, 1)
def getTMDbPoster(self, url, runlevel):
self.tmdbposter = True
headers = {'Accept': 'application/json'}
request = Request(url, headers=headers)
try:
output = urlopen(request).read()
except URLError:
output = ''
except HTTPError:
output = ''
except socket.error:
output = ''
if search('"total_results":0', output) is not None and runlevel == 1:
text = self.name.replace(' ', '%20')
self.trans = 'tmdbposter'
self.translateGoogle(text)
else:
backdrop = re.findall('"backdrop_path":"(.*?)"', output)
poster = re.findall('"poster_path":"(.*?)"', output)
try:
self.backdroplist.append('http://cf2.imgobject.com/t/p/w1280' + backdrop[0])
except IndexError:
self.backdroplist.append('http://cf2.imgobject.com/t/p/w1280' + '/default_backdrop.png')
try:
self.posterlist.append('http://cf2.imgobject.com/t/p/w154' + poster[0])
except IndexError:
self.posterlist.append('http://cf2.imgobject.com/t/p/w154' + '/default_poster.png')
self.tmdbposter = False
self.makeDataEntry(self.dbcount - 1, True)
def getTMDbData(self, url, runlevel, tmdbid, renew):
headers = {'Accept': 'application/json'}
request = Request(url, headers=headers)
try:
output = urlopen(request).read()
except URLError:
output = ''
except HTTPError:
output = ''
except socket.error:
output = ''
if search('"total_results":0', output) is not None and runlevel == 1:
text = self.name.replace(' ', '%20')
self.trans = 'tmdb'
self.translateGoogle(text)
elif search('"total_results":0', output) is not None and runlevel == 2:
movie = self.name.replace(' ', '+').replace(':', '+').replace('_', '+')
movie = movie + 'FIN'
movie = sub('[Ss][0-9]+[Ee][0-9]+.*?FIN', '', movie)
movie = sub('FIN', '', movie)
url = 'http://www.thetvdb.com/api/GetSeries.php?seriesname=' + movie
self.getTVDbData(url, 1, '0')
else:
if tmdbid == '0':
tmdbid = re.findall('"id":(.*?),', output)
try:
tmdbid = tmdbid[0]
except IndexError:
tmdbid = '0'
name = re.findall('"title":"(.*?)"', output)
backdrop = re.findall('"backdrop_path":"(.*?)"', output)
year = re.findall('"release_date":"(.*?)-', output)
poster = re.findall('"poster_path":"(.*?)"', output)
rating = re.findall('"vote_average":(.*?),', output)
try:
self.namelist[self.dbcount - 1] = name[0]
except IndexError:
self.namelist[self.dbcount - 1] = self.name
try:
self.backdroplist.append('http://cf2.imgobject.com/t/p/w1280' + backdrop[0])
except IndexError:
self.backdroplist.append('http://cf2.imgobject.com/t/p/w1280' + '/default_backdrop.png')
try:
self.posterlist.append('http://cf2.imgobject.com/t/p/w154' + poster[0])
except IndexError:
self.posterlist.append('http://cf2.imgobject.com/t/p/w154' + '/default_poster.png')
url = 'http://api.themoviedb.org/3/movie/%s?api_key=dfc629f7ff6936a269f8c5cdb194c890' % tmdbid + self.language
headers = {'Accept': 'application/json'}
request = Request(url, headers=headers)
try:
output = urlopen(request).read()
except URLError:
output = ''
except HTTPError:
output = ''
except socket.error:
output = ''
plot = re.findall('"overview":"(.*?)","', output)
if renew == True:
output = sub('"belongs_to_collection":{.*?}', '', output)
name = re.findall('"title":"(.*?)"', output)
backdrop = re.findall('"backdrop_path":"(.*?)"', output)
poster = re.findall('"poster_path":"(.*?)"', output)
url = 'http://api.themoviedb.org/3/movie/%s?api_key=dfc629f7ff6936a269f8c5cdb194c890' % tmdbid
headers = {'Accept': 'application/json'}
request = Request(url, headers=headers)
try:
output = urlopen(request).read()
except URLError:
output = ''
except HTTPError:
output = ''
except socket.error:
output = ''
output = sub('"belongs_to_collection":{.*?}', '', output)
if not plot:
plot = re.findall('"overview":"(.*?)","', output)
genre = re.findall('"genres":[[]."id":[0-9]+,"name":"(.*?)"', output)
genre2 = re.findall('"genres":[[]."id":[0-9]+,"name":".*?".,."id":[0-9]+,"name":"(.*?)"', output)
genre3 = re.findall('"genres":[[]."id":[0-9]+,"name":".*?".,."id":[0-9]+,"name":".*?".,."id":[0-9]+,"name":"(.*?)"', output)
genre4 = re.findall('"genres":[[]."id":[0-9]+,"name":".*?".,."id":[0-9]+,"name":".*?".,."id":[0-9]+,"name":".*?".,."id":[0-9]+,"name":"(.*?)"', output)
genre5 = re.findall('"genres":[[]."id":[0-9]+,"name":".*?".,."id":[0-9]+,"name":".*?".,."id":[0-9]+,"name":".*?".,."id":[0-9]+,"name":".*?".,."id":[0-9]+,"name":"(.*?)"', output)
country = re.findall('"iso_3166_1":"(.*?)"', output)
runtime = re.findall('"runtime":(.*?),', output)
if renew == True:
year = re.findall('"release_date":"(.*?)-', output)
rating = re.findall('"vote_average":(.*?),', output)
if not backdrop:
backdrop = re.findall('"backdrop_path":"(.*?)"', output)
if not poster:
poster = re.findall('"poster_path":"(.*?)"', output)
try:
self.namelist[self.dbcount - 1] = name[0]
except IndexError:
self.namelist[self.dbcount - 1] = self.name
try:
self.backdroplist.append('http://cf2.imgobject.com/t/p/w1280' + backdrop[0])
except IndexError:
self.backdroplist.append('http://cf2.imgobject.com/t/p/w1280' + '/default_backdrop.png')
try:
self.posterlist.append('http://cf2.imgobject.com/t/p/w154' + poster[0])
except IndexError:
self.posterlist.append('http://cf2.imgobject.com/t/p/w154' + '/default_poster.png')
url = 'http://api.themoviedb.org/3/movie/%s/casts?api_key=dfc629f7ff6936a269f8c5cdb194c890' % tmdbid + self.language
headers = {'Accept': 'application/json'}
request = Request(url, headers=headers)
try:
output = urlopen(request).read()
except URLError:
output = ''
except HTTPError:
output = ''
except socket.error:
output = ''
actor = re.findall('"name":"(.*?)"', output)
actor2 = re.findall('"name":".*?"name":"(.*?)"', output)
actor3 = re.findall('"name":".*?"name":".*?"name":"(.*?)"', output)
actor4 = re.findall('"name":".*?"name":".*?"name":".*?"name":"(.*?)"', output)
actor5 = re.findall('"name":".*?"name":".*?"name":".*?"name":".*?"name":"(.*?)"', output)
actor6 = re.findall('"name":".*?"name":".*?"name":".*?"name":".*?"name":".*?"name":"(.*?)"', output)
actor7 = re.findall('"name":".*?"name":".*?"name":".*?"name":".*?"name":".*?"name":".*?"name":"(.*?)"', output)
director = re.findall('"([^"]+)","department":"Directing","job":"Director"', output)
res = []
try:
res.append(runtime[0] + ' min')
except IndexError:
res.append(' ')
try:
res.append(rating[0])
except IndexError:
res.append('0.0')
try:
res.append(director[0])
except IndexError:
res.append(' ')
try:
actors = actor[0]
except IndexError:
actors = ' '
try:
actors = actors + ', ' + actor2[0]
except IndexError:
pass
try:
actors = actors + ', ' + actor3[0]
except IndexError:
pass
try:
actors = actors + ', ' + actor4[0]
except IndexError:
pass
try:
actors = actors + ', ' + actor5[0]
except IndexError:
pass
try:
actors = actors + ', ' + actor6[0]
except IndexError:
pass
if len(actors) < 95:
try:
actors = actors + ', ' + actor7[0]
except IndexError:
pass
res.append(actors)
try:
genres = genre[0]
except IndexError:
genres = ' '
try:
genres = genres + ', ' + genre2[0]
except IndexError:
pass
try:
genres = genres + ', ' + genre3[0]
except IndexError:
pass
try:
genres = genres + ', ' + genre4[0]
except IndexError:
pass
try:
genres = genres + ', ' + genre5[0]
except IndexError:
pass
res.append(genres.replace('Science Fiction', 'Sci-Fi'))
try:
res.append(year[0])
except IndexError:
res.append(' ')
try:
res.append(country[0].replace('US', 'USA'))
except IndexError:
res.append(' ')
self.infolist.append(res)
try:
self.plotlist.append(plot[0].replace('\\', ''))
except IndexError:
self.plotlist.append(' ')
self.makeDataEntry(self.dbcount - 1, True)
def translateGoogle(self, text):
if config.plugins.moviebrowser.language.value == 'de':
url = 'http://translate.google.com/m?hl=en&sl=de&q=%s' % text.title()
elif config.plugins.moviebrowser.language.value == 'es':
url = 'http://translate.google.com/m?hl=en&sl=es&q=%s' % text.title()
elif config.plugins.moviebrowser.language.value == 'ru':
url = 'http://translate.google.com/m?hl=en&sl=ru&q=%s' % text.title()
else:
url = 'http://translate.google.com/m?hl=en&sl=en&q=%s' % text.title()
agents = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)'}
before_trans = 'class="t0">'
request = Request(url, headers=agents)
try:
output = urlopen(request).read()
data = output[output.find(before_trans) + len(before_trans):]
movie = data.split('<')[0]
print '%s >> %s' % (text, movie)
except URLError:
movie = text
except HTTPError:
movie = text
except socket.error:
movie = text
if self.trans == 'imdb':
movie = movie.replace(' ', '+').replace(':', '+').replace('_', '+')
url = 'http://imdbapi.org/?title=%s&type=xml&plot=full&episode=0&limit=1&yg=0&mt=none&lang=en-US&offset=&aka=simple&release=simple&business=0&tech=0' % movie
self.getIMDbData(url, 2)
elif self.trans == 'tmdbposter':
movie = movie.replace(' ', '+').replace(':', '+').replace('-', '+').replace('_', '+')
url = 'http://api.themoviedb.org/3/search/movie?api_key=dfc629f7ff6936a269f8c5cdb194c890&query=' + movie + self.language
self.getTMDbPoster(url, 2)
elif self.trans == 'tmdb':
movie = movie.replace(' ', '+').replace(':', '+').replace('-', '+').replace('_', '+')
url = 'http://api.themoviedb.org/3/search/movie?api_key=dfc629f7ff6936a269f8c5cdb194c890&query=' + movie + self.language
self.getTMDbData(url, 2, '0', False)
elif self.trans == 'tvdb':
movie = movie.replace(' ', '+').replace(':', '+').replace('_', '+')
url = 'http://www.thetvdb.com/api/GetSeries.php?seriesname=' + movie
self.getTVDbData(url, 2, '0')
def getTVDbData(self, url, runlevel, seriesid):
agents = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)'}
request = Request(url, headers=agents)
try:
output = urlopen(request).read()
except URLError:
output = ''
except HTTPError:
output = ''
except socket.error:
output = ''
if search('<Series>', output) is None and runlevel == 1:
text = self.name.replace(' ', '%20')
text = text + 'FIN'
text = sub('[Ss][0-9]+[Ee][0-9]+.*?FIN', '', text)
text = sub('FIN', '', text)
self.trans = 'tvdb'
self.translateGoogle(text)
elif search('<Series>', output) is None and runlevel == 2:
self.backdroplist.append('http://cf2.imgobject.com/t/p/w1280' + '/default_backdrop.png')
self.posterlist.append('http://cf2.imgobject.com/t/p/w154' + '/default_poster.png')
self.namelist[self.dbcount - 1] = self.name
res = []
res.append(' ')
res.append('0.0')
res.append(' ')
res.append(' ')
res.append(' ')
res.append(' ')
res.append(' ')
self.infolist.append(res)
self.plotlist.append(' ')
self.makeDataEntry(self.dbcount - 1, False)
else:
if seriesid == '0':
seriesid = re.findall('<seriesid>(.*?)</seriesid>', output)
try:
seriesid = seriesid[0]
except IndexError:
seriesid = '0'
if search('[Ss][0-9]+[Ee][0-9]+', self.name) is not None:
data = search('([Ss][0-9]+[Ee][0-9]+)', self.name)
data = data.group(1)
season = search('[Ss]([0-9]+)[Ee]', data)
season = season.group(1).lstrip('0')
episode = search('[Ss][0-9]+[Ee]([0-9]+)', data)
episode = episode.group(1).lstrip('0')
url = 'http://www.thetvdb.com/api/D19315B88B2DE21F/series/' + seriesid + '/default/' + season + '/' + episode + '/' + config.plugins.moviebrowser.language.value + '.xml'
agents = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)'}
request = Request(url, headers=agents)
try:
output = urlopen(request).read()
except URLError:
output = ''
except HTTPError:
output = ''
except socket.error:
output = ''
output = sub('&', '&', output)
episode = re.findall('<EpisodeName>(.*?)</EpisodeName>', output)
year = re.findall('<FirstAired>([0-9]+)-', output)
guest = re.findall('<GuestStars>[|](.*?)[|]</GuestStars>', output)
director = re.findall('<Director>[|](.*?)[|]', output)
if not director:
director = re.findall('<Director>(.*?)</Director>', output)
plotfull = re.findall('<Overview>(.*?)</Overview>', output, re.S)
rating = re.findall('<Rating>(.*?)</Rating>', output)
eposter = re.findall('<filename>(.*?)</filename>', output)
else:
data = ''
episode = []
year = []
guest = []
director = []
plotfull = []
rating = []
eposter = []
url = 'http://www.thetvdb.com/data/series/' + seriesid + '/' + config.plugins.moviebrowser.language.value + '.xml'
agents = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)'}
request = Request(url, headers=agents)
try:
output = urlopen(request).read()
except URLError:
output = ''
except HTTPError:
output = ''
except socket.error:
output = ''
output = sub('&', '&', output)
name = re.findall('<SeriesName>(.*?)</SeriesName>', output)
runtime = re.findall('<Runtime>(.*?)</Runtime>', output)
if not rating:
rating = re.findall('<Rating>(.*?)</Rating>', output)
actors = re.findall('<Actors>(.*?)</Actors>', output)
try:
actor = re.findall('[|](.*?)[|]', actors[0])
except IndexError:
actor = []
try:
actor2 = re.findall('[|].*?[|](.*?)[|]', actors[0])
except IndexError:
actor2 = []
try:
actor3 = re.findall('[|].*?[|].*?[|](.*?)[|]', actors[0])
except IndexError:
actor3 = []
try:
actor4 = re.findall('[|].*?[|].*?[|].*?[|](.*?)[|]', actors[0])
except IndexError:
actor4 = []
try:
actor5 = re.findall('[|].*?[|].*?[|].*?[|].*?[|](.*?)[|]', actors[0])
except IndexError:
actor5 = []
try:
actor6 = re.findall('[|].*?[|].*?[|].*?[|].*?[|].*?[|](.*?)[|]', actors[0])
except IndexError:
actor6 = []
try:
actor7 = re.findall('[|].*?[|].*?[|].*?[|].*?[|].*?[|].*?[|](.*?)[|]', actors[0])
except IndexError:
actor7 = []
genres = re.findall('<Genre>(.*?)</Genre>', output)
try:
genre = re.findall('[|](.*?)[|]', genres[0])
except IndexError:
genre = []
try:
genre2 = re.findall('[|].*?[|](.*?)[|]', genres[0])
except IndexError:
genre2 = []
try:
genre3 = re.findall('[|].*?[|].*?[|](.*?)[|]', genres[0])
except IndexError:
genre3 = []
try:
genre4 = re.findall('[|].*?[|].*?[|].*?[|](.*?)[|]', genres[0])
except IndexError:
genre4 = []
try:
genre5 = re.findall('[|].*?[|].*?[|].*?[|].*?[|](.*?)[|]', genres[0])
except IndexError:
genre5 = []
if not year:
year = re.findall('<FirstAired>([0-9]+)-', output)
if not plotfull:
plotfull = re.findall('<Overview>(.*?)</Overview>', output, re.S)
backdrop = re.findall('<fanart>(.*?)</fanart>', output)
poster = re.findall('<poster>(.*?)</poster>', output)
try:
if not episode:
self.namelist[self.dbcount - 1] = name[0].replace('Das n\xc3\xa4chste Jahrhundert', 'TNG')
else:
self.namelist[self.dbcount - 1] = name[0].replace('Das n\xc3\xa4chste Jahrhundert', 'TNG') + ' - (' + data + ') ' + episode[0]
except IndexError:
self.namelist[self.dbcount - 1] = self.name
res = []
try:
res.append(runtime[0] + ' min')
except IndexError:
res.append(' ')
try:
res.append(rating[0])
except IndexError:
res.append('0.0')
try:
if not director:
res.append('Various')
else:
res.append(director[0])
except IndexError:
res.append('Various')
try:
actors = actor[0]
except IndexError:
actors = ' '
try:
actors = actors + ', ' + actor2[0]
except IndexError:
pass
try:
actors = actors + ', ' + actor3[0]
except IndexError:
pass
try:
actors = actors + ', ' + actor4[0]
except IndexError:
pass
try:
actors = actors + ', ' + actor5[0]
except IndexError:
pass
try:
actors = actors + ', ' + actor6[0]
except IndexError:
pass
if len(actors) < 95:
try:
actors = actors + ', ' + actor7[0]
except IndexError:
pass
res.append(actors)
try:
genres = genre[0]
except IndexError:
genres = ' '
try:
genres = genres + ', ' + genre2[0]
except IndexError:
pass
try:
genres = genres + ', ' + genre3[0]
except IndexError:
pass
try:
genres = genres + ', ' + genre4[0]
except IndexError:
pass
try:
genres = genres + ', ' + genre5[0]
except IndexError:
pass
try:
res.append(genres.replace('Science-Fiction', 'Sci-Fi'))
except IndexError:
res.append(' ')
try:
res.append(year[0])
except IndexError:
res.append(' ')
if config.plugins.moviebrowser.language.value == 'de':
country = 'DE'
elif config.plugins.moviebrowser.language.value == 'es':
country = 'ES'
else:
country = 'USA'
res.append(country)
self.infolist.append(res)
try:
if not guest:
plotfull = plotfull[0].replace('\n', '').replace('"', '"')
else:
plotfull = plotfull[0].replace('\n', '').replace('"', '"')
plotfull = plotfull + ' Guest Stars: ' + guest[0].replace('|', ', ') + '.'
self.plotlist.append(plotfull)
except IndexError:
self.plotlist.append(' ')
try:
self.backdroplist.append('http://www.thetvdb.com/banners/' + backdrop[0])
except IndexError:
self.backdroplist.append('http://cf2.imgobject.com/t/p/w1280' + '/default_backdrop.png')
try:
if not eposter:
self.posterlist.append('http://www.thetvdb.com/banners/_cache/' + poster[0])
else:
self.posterlist.append('http://www.thetvdb.com/banners/_cache/' + poster[0] + '<episode>' + 'http://www.thetvdb.com/banners/' + eposter[0] + '<episode>')
except IndexError:
self.posterlist.append('http://cf2.imgobject.com/t/p/w154' + '/default_poster.png')
self.makeDataEntry(self.dbcount - 1, False)
def makeDataEntry(self, count, content):
if self.renew == False:
f = open(self.database, 'a')
try:
if content == True:
data = self.namelist[count] + ':::' + self.movielist[count] + ':::' + str(self.datelist[count]) + ':::' + self.infolist[count][0] + ':::' + self.infolist[count][1] + ':::' + self.infolist[count][2] + ':::' + self.infolist[count][3] + ':::' + self.infolist[count][4] + ':::' + self.infolist[count][5] + ':::' + self.infolist[count][6] + ':::' + self.plotlist[count] + ':::' + self.posterlist[count] + ':::' + self.backdroplist[count] + ':::Movie:::\n'
else:
data = self.namelist[count] + ':::' + self.movielist[count] + ':::' + str(self.datelist[count]) + ':::' + self.infolist[count][0] + ':::' + self.infolist[count][1] + ':::' + self.infolist[count][2] + ':::' + self.infolist[count][3] + ':::' + self.infolist[count][4] + ':::' + self.infolist[count][5] + ':::' + self.infolist[count][6] + ':::' + self.plotlist[count] + ':::' + self.posterlist[count] + ':::' + self.backdroplist[count] + ':::Series:::\n'
f.write(data)
except IndexError:
pass
f.close()
else:
try:
if content == True:
newdata = self.namelist[count] + ':::' + self.movielist[self.index] + ':::' + self.datelist[self.index] + ':::' + self.infolist[count][0] + ':::' + self.infolist[count][1] + ':::' + self.infolist[count][2] + ':::' + self.infolist[count][3] + ':::' + self.infolist[count][4] + ':::' + self.infolist[count][5] + ':::' + self.infolist[count][6] + ':::' + self.plotlist[count] + ':::' + self.posterlist[count] + ':::' + self.backdroplist[count] + ':::Movie:::'
else:
newdata = self.namelist[count] + ':::' + self.movielist[self.index] + ':::' + self.datelist[self.index] + ':::' + self.infolist[count][0] + ':::' + self.infolist[count][1] + ':::' + self.infolist[count][2] + ':::' + self.infolist[count][3] + ':::' + self.infolist[count][4] + ':::' + self.infolist[count][5] + ':::' + self.infolist[count][6] + ':::' + self.plotlist[count] + ':::' + self.posterlist[count] + ':::' + self.backdroplist[count] + ':::Series:::'
except IndexError:
newdata = ''
data = open(self.database).read()
movie = self.movielist[self.index]
movie = sub('\\(', '.', movie)
movie = sub('\\)', '.', movie)
if search(movie, data) is not None:
for line in data.split('\n'):
if search(movie, line) is not None:
data = data.replace(line, newdata)
f = open(self.database, 'w')
f.write(data)
f.close()
if self.dbcount < self.dbcountmax:
self.dbcount += 1
self.name = self.namelist[self.dbcount - 1]
if self.firstdatabase == 'tmdb':
movie = self.name.replace(' ', '+').replace(':', '+').replace('-', '+').replace('_', '+')
url = 'http://api.themoviedb.org/3/search/movie?api_key=dfc629f7ff6936a269f8c5cdb194c890&query=' + movie + self.language
self.getTMDbData(url, 1, '0', False)
elif self.firstdatabase == 'imdb':
movie = self.name.replace(' ', '+').replace(':', '+').replace('_', '+')
url = 'http://imdbapi.org/?title=%s&type=xml&plot=full&episode=0&limit=1&yg=0&mt=none&lang=en-US&offset=&aka=simple&release=simple&business=0&tech=0' % movie
self.getIMDbData(url, 1)
else:
movie = self.name.replace(' ', '+').replace(':', '+').replace('_', '+')
movie = movie + 'FIN'
movie = sub('[Ss][0-9]+[Ee][0-9]+.*?FIN', '', movie)
movie = sub('FIN', '', movie)
url = 'http://www.thetvdb.com/api/GetSeries.php?seriesname=' + movie
self.getTVDbData(url, 1, '0')
elif self.update == True:
if self.reset == True:
self.session.openWithCallback(self.exit, movieBrowserBackdrop, self.index, config.plugins.moviebrowser.filter.value, config.plugins.moviebrowser.filter.value)
else:
self.finished_update(True)
else:
self.finished()
def finished(self):
if self.renew == False:
self.index = 0
if self.xd == True:
self.posterindex = 5
else:
self.posterindex = 6
self.makeMovies(self.filter)
else:
self.renew = False
self.makeMovies(self.filter)
def finished_update(self, found):
if found == False and self.orphaned == 0:
self.session.open(MessageBox, _('\nNo new Movies found:\nYour Database is up to date.'), MessageBox.TYPE_INFO)
os.remove(self.updatefile)
self.makeMovies(self.filter)
elif found == False:
if self.orphaned == 1:
self.session.open(MessageBox, _('\nNo new Movies found.\n%s Orphaned Movie deleted from Database.') % str(self.orphaned), MessageBox.TYPE_INFO)
else:
self.session.open(MessageBox, _('\nNo new Movies found.\n%s Orphaned Movies deleted from Database.') % str(self.orphaned), MessageBox.TYPE_INFO)
os.remove(self.updatefile)
self.makeMovies(self.filter)
elif self.orphaned == 0:
if self.dbcountmax == 1:
self.session.open(MessageBox, _('\n%s Movie imported into Database.') % str(self.dbcountmax), MessageBox.TYPE_INFO)
else:
self.session.open(MessageBox, _('\n%s Movies imported into Database.') % str(self.dbcountmax), MessageBox.TYPE_INFO)
if fileExists(self.updatefile):
self.sortDatabase()
os.remove(self.updatefile)
self.makeMovies(self.filter)
else:
if self.dbcountmax == 1 and self.orphaned == 1:
self.session.open(MessageBox, _('\n%s Movie imported into Database.\n%s Orphaned Movie deleted from Database.') % (str(self.dbcountmax), str(self.orphaned)), MessageBox.TYPE_INFO)
elif self.dbcountmax == 1:
self.session.open(MessageBox, _('\n%s Movie imported into Database.\n%s Orphaned Movies deleted from Database.') % (str(self.dbcountmax), str(self.orphaned)), MessageBox.TYPE_INFO)
elif self.orphaned == 1:
self.session.open(MessageBox, _('\n%s Movies imported into Database.\n%s Orphaned Movie deleted from Database.') % (str(self.dbcountmax), str(self.orphaned)), MessageBox.TYPE_INFO)
else:
self.session.open(MessageBox, _('\n%s Movies imported into Database.\n%s Orphaned Movies deleted from Database.') % (str(self.dbcountmax), str(self.orphaned)), MessageBox.TYPE_INFO)
if fileExists(self.updatefile):
self.sortDatabase()
os.remove(self.updatefile)
self.makeMovies(self.filter)
def ok(self):
if self.ready == True:
try:
filename = self.movielist[self.index]
if filename.endswith('.ts'):
sref = eServiceReference('1:0:0:0:0:0:0:0:0:0:' + filename)
sref.setName(self.namelist[self.index])
self.session.open(MoviePlayer, sref)
elif filename.endswith('.iso') or filename.endswith('.ISO'):
if os.path.exists('/usr/lib/enigma2/python/Plugins/Extensions/DVDPlayer/'):
from Plugins.Extensions.DVDPlayer.plugin import DVDPlayer
self.session.open(DVDPlayer, dvd_filelist=[filename])
else:
self.session.open(MessageBox, _('DVD Player Plugin not installed.'), MessageBox.TYPE_ERROR)
else:
sref = eServiceReference('4097:0:0:0:0:0:0:0:0:0:' + filename)
sref.setName(self.namelist[self.index])
self.session.open(MoviePlayer, sref)
except IndexError:
pass
def renewIMDb(self):
if self.ready == True:
name = self.movielist[self.index]
name = sub('.*?[/]', '', name)
if name.endswith('.ts'):
name = sub('.*? - .*? - ', '', name)
name = sub('[.]ts', '', name)
else:
name = sub('[.]avi', '', name)
name = sub('[.]divx', '', name)
name = sub('[.]flv', '', name)
name = sub('[.]iso', '', name)
name = sub('[.]ISO', '', name)
name = sub('[.]m2ts', '', name)
name = sub('[.]mov', '', name)
name = sub('[.]mp4', '', name)
name = sub('[.]mpg', '', name)
name = sub('[.]mpeg', '', name)
name = sub('[.]mkv', '', name)
name = sub('[.]vob', '', name)
self.session.openWithCallback(self.renewIMDbReturn, VirtualKeyBoard, title='Update Single Movie Data - IMDb:', text=name)
def renewIMDbReturn(self, name):
if name and name != '':
self.name = name
name = name.replace(' ', '+').replace(':', '+').replace('_', '+')
url = 'http://imdbapi.org/?title=%s&type=xml&plot=full&episode=0&limit=10&yg=0&mt=none&lang=en-US&offset=&aka=simple&release=simple&business=0&tech=0' % name
self.getIMDbMovies(url, 1)
def getIMDbMovies(self, url, runlevel):
agents = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)'}
request = Request(url, headers=agents)
try:
output = urlopen(request).read()
except URLError:
output = ''
except HTTPError:
output = ''
except socket.error:
output = ''
output = output.replace('&', '&')
output = sub('</type><imdb_id>', '</type><poster>http://profile.ak.fbcdn.net/hprofile-ak-snc7/373026_15925638948_1021284996_q.jpg</poster><imdb_id>', output)
rating = re.findall('<rating>(.*?)</rating>', output)
year = re.findall('<year>(.*?)</year>', output)
titles = re.findall('<title>(.*?)</title>', output)
poster = re.findall('<poster>(.*?)</poster>', output)
id = re.findall('<imdb_id>(.*?)</imdb_id>', output)
country = re.findall('<country><item>(.*?)</item>', output)
titel = 'IMDb Results'
if not titles and runlevel == 1:
text = self.name.replace(' ', '%20')
self.transrenew = 'imdb'
self.translateRenewGoogle(text)
elif not titles and runlevel == 2:
self.session.openWithCallback(self.tvdb_return, MessageBox, _('\nNo IMDb Results - looking for %s on TheTVDb?') % self.name, MessageBox.TYPE_YESNO)
else:
self.session.openWithCallback(self.makeIMDbUpdate, moviesList, titel, rating, year, titles, poster, id, country)
def makeIMDbUpdate(self, id):
self.renew = True
self.firstdatabase = 'imdb'
self.dbcount = 1
self.dbcountmax = 1
self.infolist = []
self.plotlist = []
self.backdroplist = []
self.posterlist = []
url = 'http://imdbapi.org/?ids=%s&type=xml&plot=full&episode=0&lang=en-US&aka=simple&release=simple&business=0&tech=0' % id
self.getIMDbData(url, 1)
def renewTMDb(self):
if self.ready == True:
name = self.movielist[self.index]
name = sub('.*?[/]', '', name)
if name.endswith('.ts'):
name = sub('.*? - .*? - ', '', name)
name = sub('[.]ts', '', name)
else:
name = sub('[.]avi', '', name)
name = sub('[.]divx', '', name)
name = sub('[.]flv', '', name)
name = sub('[.]iso', '', name)
name = sub('[.]ISO', '', name)
name = sub('[.]m2ts', '', name)
name = sub('[.]mov', '', name)
name = sub('[.]mp4', '', name)
name = sub('[.]mpg', '', name)
name = sub('[.]mpeg', '', name)
name = sub('[.]mkv', '', name)
name = sub('[.]vob', '', name)
if config.plugins.moviebrowser.database.value == 'tvdb':
self.session.openWithCallback(self.renewTMDbReturn, VirtualKeyBoard, title='Update Single Series Data - TheTVDb:', text=name)
else:
self.session.openWithCallback(self.renewTMDbReturn, VirtualKeyBoard, title='Update Single Movie Data - TMDb:', text=name)
def renewTMDbReturn(self, name):
if name and name != '':
self.name = name
if config.plugins.moviebrowser.database.value == 'tmdb':
name = name.replace(' ', '+').replace(':', '+').replace('-', '+').replace('_', '+')
url = 'http://api.themoviedb.org/3/search/movie?api_key=dfc629f7ff6936a269f8c5cdb194c890&query=' + name + self.language
self.getTMDbMovies(url, 1)
else:
name = name.replace(' ', '+').replace(':', '+').replace('_', '+')
name = name + 'FIN'
name = sub('[Ss][0-9]+[Ee][0-9]+.*?FIN', '', name)
name = sub('FIN', '', name)
url = 'http://www.thetvdb.com/api/GetSeries.php?seriesname=' + name
self.getTVDbMovies(url, 1)
def getTMDbMovies(self, url, runlevel):
headers = {'Accept': 'application/json'}
request = Request(url, headers=headers)
try:
output = urlopen(request).read()
except URLError:
output = ''
except HTTPError:
output = ''
except socket.error:
output = ''
output = output.replace('&', '&')
output = sub('"poster_path":"', '"poster_path":"http://cf2.imgobject.com/t/p/w154', output)
output = sub('"poster_path":null', '"poster_path":"http://www.themoviedb.org/images/apps/moviebase.png"', output)
rating = re.findall('"vote_average":(.*?),', output)
year = re.findall('"release_date":"(.*?)-', output)
titles = re.findall('"title":"(.*?)"', output)
poster = re.findall('"poster_path":"(.*?)"', output)
id = re.findall('"id":(.*?),', output)
country = re.findall('"backdrop(.*?)_path"', output)
titel = 'TMDb Results'
if not titles and runlevel == 1:
text = self.name.replace(' ', '%20')
self.transrenew = 'tmdb'
self.translateRenewGoogle(text)
elif not titles and runlevel == 2:
self.session.openWithCallback(self.tvdb_return, MessageBox, _('\nNo TMDb Results - looking for %s on TheTVDb?') % self.name, MessageBox.TYPE_YESNO)
else:
self.session.openWithCallback(self.makeTMDbUpdate, moviesList, titel, rating, year, titles, poster, id, country)
def tvdb_return(self, answer):
if answer is True:
name = self.name.replace(' ', '+').replace(':', '+').replace('_', '+')
name = name + 'FIN'
name = sub('[Ss][0-9]+[Ee][0-9]+.*?FIN', '', name)
name = sub('FIN', '', name)
url = 'http://www.thetvdb.com/api/GetSeries.php?seriesname=' + name
self.getTVDbMovies(url, 1)
def getTVDbMovies(self, url, runlevel):
rating = []
year = []
titles = []
poster = []
id = []
country = []
agents = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)'}
request = Request(url, headers=agents)
try:
output = urlopen(request).read()
except URLError:
output = ''
except HTTPError:
output = ''
except socket.error:
output = ''
output = output.replace('&', '&')
seriesid = re.findall('<seriesid>(.*?)</seriesid>', output)
for x in range(len(seriesid)):
url = 'http://www.thetvdb.com/data/series/' + seriesid[x] + '/' + config.plugins.moviebrowser.language.value + '.xml'
agents = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)'}
request = Request(url, headers=agents)
try:
output = urlopen(request).read()
except URLError:
output = ''
except HTTPError:
output = ''
except socket.error:
output = ''
output = sub('<poster>', '<poster>http://www.thetvdb.com/banners/_cache/', output)
output = sub('<poster>http://www.thetvdb.com/banners/_cache/</poster>', '<poster>http://www.thetvdb.com/wiki/skins/common/images/wiki.png</poster>', output)
output = sub('<Rating></Rating>', '<Rating>0.0</Rating>', output)
output = sub('&', '&', output)
Rating = re.findall('<Rating>(.*?)</Rating>', output)
Year = re.findall('<FirstAired>([0-9]+)-', output)
Added = re.findall('<added>([0-9]+)-', output)
Titles = re.findall('<SeriesName>(.*?)</SeriesName>', output)
Poster = re.findall('<poster>(.*?)</poster>', output)
TVDbid = re.findall('<id>(.*?)</id>', output)
Country = re.findall('<Status>(.*?)</Status>', output)
try:
rating.append(Rating[0])
except IndexError:
rating('0.0')
try:
year.append(Year[0])
except IndexError:
try:
year.append(Added[0])
except IndexError:
year.append(' ')
try:
titles.append(Titles[0])
except IndexError:
titles.append(' ')
try:
poster.append(Poster[0])
except IndexError:
poster.append('http://www.thetvdb.com/wiki/skins/common/images/wiki.png')
try:
id.append(TVDbid[0])
except IndexError:
id.append('0')
try:
country.append(Country[0])
except IndexError:
country.append(' ')
titel = 'TheTVDb Results'
if not titles and runlevel == 1:
text = self.name.replace(' ', '%20')
text = text + 'FIN'
text = sub('[Ss][0-9]+[Ee][0-9]+.*?FIN', '', text)
text = sub('FIN', '', text)
self.transrenew = 'tvdb'
self.translateRenewGoogle(text)
elif not titles and runlevel == 2:
self.session.open(MessageBox, _('\nNo TheTVDb Results for %s.') % self.name, MessageBox.TYPE_INFO)
else:
self.session.openWithCallback(self.makeTVDbUpdate, moviesList, titel, rating, year, titles, poster, id, country)
def makeTMDbUpdate(self, id):
self.renew = True
self.firstdatabase = 'tmdb'
self.dbcount = 1
self.dbcountmax = 1
self.infolist = []
self.plotlist = []
self.backdroplist = []
self.posterlist = []
url = 'http://api.themoviedb.org/3/movie/%s?api_key=dfc629f7ff6936a269f8c5cdb194c890' % id + self.language
self.getTMDbData(url, 1, id, True)
def makeTVDbUpdate(self, id):
self.renew = True
self.firstdatabase = 'tvdb'
self.dbcount = 1
self.dbcountmax = 1
self.infolist = []
self.plotlist = []
self.backdroplist = []
self.posterlist = []
url = 'http://www.thetvdb.com/data/series/' + id + '/' + config.plugins.moviebrowser.language.value + '.xml'
self.getTVDbData(url, 1, id)
def translateRenewGoogle(self, text):
if config.plugins.moviebrowser.language.value == 'de':
url = 'http://translate.google.com/m?hl=en&sl=de&q=%s' % text.title()
elif config.plugins.moviebrowser.language.value == 'es':
url = 'http://translate.google.com/m?hl=en&sl=es&q=%s' % text.title()
elif config.plugins.moviebrowser.language.value == 'ru':
url = 'http://translate.google.com/m?hl=en&sl=ru&q=%s' % text.title()
else:
url = 'http://translate.google.com/m?hl=en&sl=en&q=%s' % text.title()
agents = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)'}
before_trans = 'class="t0">'
request = Request(url, headers=agents)
try:
output = urlopen(request).read()
data = output[output.find(before_trans) + len(before_trans):]
movie = data.split('<')[0]
print '%s >> %s' % (text, movie)
except URLError:
movie = text
except HTTPError:
movie = text
except socket.error:
movie = text
if self.transrenew == 'imdb':
movie = movie.replace(' ', '+').replace(':', '+').replace('_', '+')
url = 'http://imdbapi.org/?title=%s&type=xml&plot=full&episode=0&limit=10&yg=0&mt=none&lang=en-US&offset=&aka=simple&release=simple&business=0&tech=0' % movie
self.getIMDbMovies(url, 2)
elif self.transrenew == 'tmdb':
movie = movie.replace(' ', '+').replace(':', '+').replace('-', '+').replace('_', '+')
url = 'http://api.themoviedb.org/3/search/movie?api_key=dfc629f7ff6936a269f8c5cdb194c890&query=' + movie + self.language
self.getTMDbMovies(url, 2)
elif self.transrenew == 'tvdb':
movie = movie.replace(' ', '+').replace(':', '+').replace('_', '+')
url = 'http://www.thetvdb.com/api/GetSeries.php?seriesname=' + movie
self.getTVDbMovies(url, 2)
def deleteMovie(self):
if self.ready == True:
try:
name = self.namelist[self.index]
self.session.openWithCallback(self.delete_return, MessageBox, _('\nDo you really want to delete %s?') % name, MessageBox.TYPE_YESNO)
except IndexError:
pass
def delete_return(self, answer):
if answer is True:
try:
movie = self.movielist[self.index]
if fileExists(movie):
os.remove(movie)
if search('[.]ts', movie) is not None:
eitfile = sub('[.]ts', '.eit', movie)
if fileExists(eitfile):
os.remove(eitfile)
if fileExists(movie + '.ap'):
os.remove(movie + '.ap')
if fileExists(movie + '.cuts'):
os.remove(movie + '.cuts')
if fileExists(movie + '.meta'):
os.remove(movie + '.meta')
if fileExists(movie + '.sc'):
os.remove(movie + '.sc')
if fileExists(movie + '_mp.jpg'):
os.remove(movie + '_mp.jpg')
movie = sub('\\(', '.', movie)
movie = sub('\\)', '.', movie)
data = open(self.database).read()
for line in data.split('\n'):
if search(movie, line) is not None:
data = data.replace(line + '\n', '')
f = open(self.database, 'w')
f.write(data)
f.close()
if self.index == self.maxentry - 1:
self.index -= 1
self.makeMovies(self.filter)
except IndexError:
pass
else:
self.blacklistMovie()
def blacklistMovie(self):
if self.ready == True:
try:
name = self.namelist[self.index]
self.session.openWithCallback(self.blacklist_return, MessageBox, _('\nDo you really want to blacklist %s?') % name, MessageBox.TYPE_YESNO)
except IndexError:
pass
def blacklist_return(self, answer):
if answer is True:
self.ready = False
try:
movie = self.movielist[self.index]
movie = sub('\\(', '.', movie)
movie = sub('\\)', '.', movie)
if fileExists(self.blacklist):
fremove = open(self.blacklist, 'a')
else:
open(self.blacklist, 'w').close()
fremove = open(self.blacklist, 'a')
data = open(self.database).read()
for line in data.split('\n'):
if search(movie, line) is not None:
fremove.write(line + '\n')
fremove.close()
data = data.replace(line + '\n', '')
f = open(self.database, 'w')
f.write(data)
f.close()
if self.index == self.maxentry - 1:
self.index -= 1
self.makeMovies(self.filter)
except IndexError:
pass
def togglePlotFull(self):
if self.ready == True:
if self.plotfull == False:
self.plotfull = True
try:
self.showPlotFull(self.index)
except IndexError:
pass
else:
self.plotfull = False
self.hidePlotFull()
def showPlotFull(self, index):
if self.xd == False:
PlotFull = loadPic(self.infoBackPNG, 525, 430, 3, 0, 0, 1)
else:
PlotFull = loadPic(self.infoBackPNG, 460, 400, 3, 0, 0, 1)
if PlotFull != None:
self['plotfullback'].instance.setPixmap(PlotFull)
self['plotfullback'].show()
try:
plot = self.plotlist[self.index]
self['plotfull'].setText(plot)
self['plotfull'].show()
self.makeEPoster()
except IndexError:
self['plotfull'].hide()
self.hideEPoster()
def hidePlotFull(self):
self.hideEPoster()
self['plotfull'].hide()
self['plotfullback'].hide()
def toggleBackdrops(self):
if self.ready == True:
if self.backdrops == True:
self.backdrops = False
self.hideBackdrops()
else:
self.backdrops = True
try:
self.showBackdrops(self.index)
except IndexError:
pass
def hideBackdrops(self):
backdrop = config.plugins.moviebrowser.cachefolder.value + '/default_backdrop.png'
if fileExists(backdrop):
if self.xd == False:
Backdrop = loadPic(backdrop, 1280, 720, 3, 0, 0, 1)
else:
Backdrop = loadPic(backdrop, 1024, 576, 3, 0, 0, 1)
if Backdrop != None:
self['backdrop'].instance.setPixmap(Backdrop)
self['backdrop'].show()
def showBackdrops(self, index):
try:
backdropurl = self.backdroplist[index]
backdrop = sub('http://cf2.imgobject.com/t/p/w1280', '', backdropurl)
backdrop = sub('http://www.thetvdb.com/banners/fanart/original', '', backdrop)
backdrop = config.plugins.moviebrowser.cachefolder.value + backdrop
if config.plugins.moviebrowser.m1v.value == 'yes':
backdrop_m1v = backdrop.replace('.jpg', '.m1v')
if fileExists(backdrop_m1v):
self['backdrop'].hide()
os.system("/usr/bin/showiframe '%s'" % backdrop_m1v)
elif fileExists(backdrop):
if self.xd == False:
Backdrop = loadPic(backdrop, 1280, 720, 3, 0, 0, 1)
else:
Backdrop = loadPic(backdrop, 1024, 576, 3, 0, 0, 1)
if Backdrop != None:
self['backdrop'].instance.setPixmap(Backdrop)
self['backdrop'].show()
else:
getPage(backdropurl).addCallback(self.getBackdrop, backdrop, index).addErrback(self.downloadError)
elif fileExists(backdrop):
if self.xd == False:
Backdrop = loadPic(backdrop, 1280, 720, 3, 0, 0, 1)
else:
Backdrop = loadPic(backdrop, 1024, 576, 3, 0, 0, 1)
if Backdrop != None:
self['backdrop'].instance.setPixmap(Backdrop)
self['backdrop'].show()
else:
getPage(backdropurl).addCallback(self.getBackdrop, backdrop, index).addErrback(self.downloadError)
except IndexError:
self['backdrop'].hide()
def getBackdrop(self, output, backdrop, index):
f = open(backdrop, 'wb')
f.write(output)
f.close()
if self.xd == False:
Backdrop = loadPic(backdrop, 1280, 720, 3, 0, 0, 1)
else:
Backdrop = loadPic(backdrop, 1024, 576, 3, 0, 0, 1)
if Backdrop != None:
self['backdrop'].instance.setPixmap(Backdrop)
self['backdrop'].show()
def makePoster(self):
for x in range(self.posterALL):
try:
index = self.index - self.posterindex + x
if index >= self.maxentry:
index = index - self.maxentry
elif index < 0:
index = self.maxentry + index
posterurl = self.posterlist[index]
posterurl = sub('<episode>.*?<episode>', '', posterurl)
poster = sub('http://cf2.imgobject.com/t/p/w154', '', posterurl)
poster = sub('http://www.thetvdb.com/banners/_cache/posters', '', poster)
poster = config.plugins.moviebrowser.cachefolder.value + poster
if fileExists(poster):
if self.xd == False:
if x == 6:
Poster = loadPic(poster, 150, 225, 3, 0, 0, 1)
else:
Poster = loadPic(poster, 100, 150, 3, 0, 0, 1)
elif x == 5:
Poster = loadPic(poster, 138, 207, 3, 0, 0, 1)
else:
Poster = loadPic(poster, 92, 138, 3, 0, 0, 1)
if Poster != None:
self['poster' + str(x)].instance.setPixmap(Poster)
self['poster' + str(x)].show()
else:
getPage(posterurl).addCallback(self.getPoster, x, poster).addErrback(self.downloadError)
except IndexError:
self['poster' + str(x)].hide()
def getPoster(self, output, x, poster):
f = open(poster, 'wb')
f.write(output)
f.close()
if self.xd == False:
if x == 6:
Poster = loadPic(poster, 150, 225, 3, 0, 0, 1)
else:
Poster = loadPic(poster, 100, 150, 3, 0, 0, 1)
elif x == 5:
Poster = loadPic(poster, 138, 207, 3, 0, 0, 1)
else:
Poster = loadPic(poster, 92, 138, 3, 0, 0, 1)
if Poster != None:
self['poster' + str(x)].instance.setPixmap(Poster)
self['poster' + str(x)].show()
def makeEPoster(self):
try:
posterurl = self.posterlist[self.index]
if search('<episode>', posterurl) is not None:
eposterurl = search('<episode>(.*?)<episode>', posterurl)
eposterurl = eposterurl.group(1)
eposter = sub('.*?[/]', '', eposterurl)
eposter = config.plugins.moviebrowser.cachefolder.value + '/' + eposter
if fileExists(eposter):
if self.xd == False:
ePoster = loadPic(eposter, 500, 375, 3, 0, 0, 0)
else:
ePoster = loadPic(eposter, 440, 330, 3, 0, 0, 0)
if ePoster != None:
self['name'].hide()
self['genres'].hide()
self['eposter'].instance.setPixmap(ePoster)
self['eposter'].show()
else:
getPage(eposterurl).addCallback(self.getEPoster, eposter).addErrback(self.downloadError)
else:
self['eposter'].hide()
except IndexError:
pass
def getEPoster(self, output, eposter):
f = open(eposter, 'wb')
f.write(output)
f.close()
if self.xd == False:
ePoster = loadPic(eposter, 500, 375, 3, 0, 0, 0)
else:
ePoster = loadPic(eposter, 440, 330, 3, 0, 0, 0)
if ePoster != None:
self['name'].hide()
self['genres'].hide()
self['eposter'].instance.setPixmap(ePoster)
self['eposter'].show()
def hideEPoster(self):
self['eposter'].hide()
self['name'].show()
self['genres'].show()
def makeName(self, count):
try:
name = self.namelist[count]
if self.xd == True:
if len(name) > 66:
if name[65:66] == ' ':
name = name[0:65]
else:
name = name[0:66] + 'FIN'
name = sub(' \\S+FIN', '', name)
name = name + '...'
elif len(name) > 63:
if name[62:63] == ' ':
name = name[0:62]
else:
name = name[0:63] + 'FIN'
name = sub(' \\S+FIN', '', name)
name = name + '...'
self['name'].setText(name)
self['name'].show()
except IndexError:
self['name'].hide()
def makeInfo(self, count):
try:
runtime = self.infolist[count][0]
self['runtime'].setText(runtime)
self['runtime'].show()
except IndexError:
self['runtime'].hide()
try:
ratings = self.infolist[count][1]
try:
rating = int(10 * round(float(ratings), 1))
except ValueError:
ratings = '0.0'
rating = int(10 * round(float(ratings), 1))
self['ratings'].setValue(rating)
self['ratings'].show()
self['ratingsback'].show()
self['ratingtext'].setText(ratings)
except IndexError:
self['ratings'].hide()
try:
director = self.infolist[count][2]
self['director'].setText(director)
self['director'].show()
except IndexError:
self['director'].hide()
try:
actors = self.infolist[count][3]
self['actors'].setText(actors)
self['actors'].show()
except IndexError:
self['actors'].hide()
try:
genres = self.infolist[count][4]
self['genres'].setText(genres)
self['genres'].show()
except IndexError:
self['genres'].hide()
try:
year = self.infolist[count][5]
self['year'].setText(year)
self['year'].show()
except IndexError:
self['year'].hide()
try:
country = self.infolist[count][6]
self['country'].setText(country)
self['country'].show()
except IndexError:
self['country'].hide()
def rightDown(self):
if self.ready == True:
self.index += 1
if self.index == self.maxentry:
self.index = 0
try:
self.makePoster()
if self.backdrops == True:
self.showBackdrops(self.index)
self.makeName(self.index)
self.makeInfo(self.index)
if self.plotfull == True:
self.showPlotFull(self.index)
except IndexError:
pass
def down(self):
if self.ready == True:
self.index += self.posterALL
if self.index >= self.maxentry:
self.index = self.index - self.maxentry
try:
self.makePoster()
if self.backdrops == True:
self.showBackdrops(self.index)
self.makeName(self.index)
self.makeInfo(self.index)
if self.plotfull == True:
self.showPlotFull(self.index)
except IndexError:
pass
def leftUp(self):
if self.ready == True:
self.index -= 1
if self.index < 0:
self.index = self.maxentry - 1
try:
self.makePoster()
if self.backdrops == True:
self.showBackdrops(self.index)
self.makeName(self.index)
self.makeInfo(self.index)
if self.plotfull == True:
self.showPlotFull(self.index)
except IndexError:
pass
def up(self):
if self.ready == True:
self.index -= self.posterALL
if self.index < 0:
self.index = self.maxentry + self.index
try:
self.makePoster()
if self.backdrops == True:
self.showBackdrops(self.index)
self.makeName(self.index)
self.makeInfo(self.index)
if self.plotfull == True:
self.showPlotFull(self.index)
except IndexError:
pass
def gotoEnd(self):
if self.ready == True:
self.index = self.maxentry - 1
try:
self.makePoster()
if self.backdrops == True:
self.showBackdrops(self.index)
self.makeName(self.index)
self.makeInfo(self.index)
if self.plotfull == True:
self.showPlotFull(self.index)
except IndexError:
pass
def showMovies(self):
if self.ready == True:
movies = ''
if fileExists(self.database):
f = open(self.database, 'r')
for line in f:
if self.content in line and self.filter in line:
movieline = line.split(':::')
try:
movie = movieline[0]
except IndexError:
movie = ' '
if movie != ' ':
movies = movies + movie + ':::'
self.movies = [ i for i in movies.split(':::') ]
self.movies.pop()
self.session.openWithCallback(self.gotoMovie, allMovieList, self.movies, self.index, self.content)
def gotoMovie(self, index):
if self.ready == True:
self.index = index
try:
self.makePoster()
if self.backdrops == True:
self.showBackdrops(self.index)
self.makeName(self.index)
self.makeInfo(self.index)
if self.plotfull == True:
self.showPlotFull(self.index)
except IndexError:
pass
def filterGenre(self):
if self.ready == True:
genres = ''
if fileExists(self.database):
f = open(self.database, 'r')
for line in f:
if self.content in line:
movieline = line.split(':::')
try:
genre = movieline[7]
except IndexError:
genre = ' '
if genre != ' ':
genres = genres + genre + ', '
self.genres = [ i for i in genres.split(', ') ]
self.genres.sort()
self.genres.pop(0)
try:
last = self.genres[-1]
for i in range(len(self.genres) - 2, -1, -1):
if last == self.genres[i]:
del self.genres[i]
else:
last = self.genres[i]
except IndexError:
pass
self.index = 0
if self.xd == True:
self.posterindex = 5
else:
self.posterindex = 6
self.session.openWithCallback(self.makeMovies, filterList, self.genres, 'Genre Filter')
def filterActor(self):
if self.ready == True:
actors = ''
if fileExists(self.database):
f = open(self.database, 'r')
for line in f:
if self.content in line:
movieline = line.split(':::')
try:
actor = movieline[6]
except IndexError:
actor = ' '
if actor != ' ':
actors = actors + actor + ', '
self.actors = [ i for i in actors.split(', ') ]
self.actors.sort()
self.actors.pop(0)
try:
last = self.actors[-1]
for i in range(len(self.actors) - 2, -1, -1):
if last == self.actors[i]:
del self.actors[i]
else:
last = self.actors[i]
except IndexError:
pass
self.index = 0
if self.xd == True:
self.posterindex = 5
else:
self.posterindex = 6
self.session.openWithCallback(self.makeMovies, filterList, self.actors, 'Actor Filter')
def filterDirector(self):
if self.ready == True:
directors = ''
if fileExists(self.database):
f = open(self.database, 'r')
for line in f:
if self.content in line:
movieline = line.split(':::')
try:
director = movieline[5]
except IndexError:
director = ' '
if director != ' ':
directors = directors + director + ', '
self.directors = [ i for i in directors.split(', ') ]
self.directors.sort()
self.directors.pop(0)
try:
last = self.directors[-1]
for i in range(len(self.directors) - 2, -1, -1):
if last == self.directors[i]:
del self.directors[i]
else:
last = self.directors[i]
except IndexError:
pass
self.index = 0
if self.xd == True:
self.posterindex = 5
else:
self.posterindex = 6
self.session.openWithCallback(self.makeMovies, filterList, self.directors, 'Director Filter')
def filterSeasons(self):
if self.ready == True:
self.content = ':::Series:::'
seasons = ''
if fileExists(self.database):
f = open(self.database, 'r')
for line in f:
if self.content in line:
movieline = line.split(':::')
try:
season = movieline[0]
season = season + 'FIN'
season = sub('[(]S', 'Season ', season)
season = sub('[(]s', 'season ', season)
season = sub('[Ee][0-9]+[)].*?FIN', '', season)
season = sub('FIN', '', season)
except IndexError:
season = ' '
if season != ' ':
seasons = seasons + season + ', '
self.seasons = [ i for i in seasons.split(', ') ]
self.seasons.sort()
self.seasons.pop(0)
try:
last = self.seasons[-1]
for i in range(len(self.seasons) - 2, -1, -1):
if last == self.seasons[i]:
del self.seasons[i]
else:
last = self.seasons[i]
except IndexError:
pass
self.index = 0
if self.xd == True:
self.posterindex = 5
else:
self.posterindex = 6
self.session.openWithCallback(self.makeMovies, filterSeasonList, self.seasons)
def sortDatabase(self):
self.sortorder = config.plugins.moviebrowser.sortorder.value
f = open(self.database, 'r')
lines = f.readlines()
f.close()
if self.sortorder == 'name':
lines.sort(key=lambda line: line.split(':::')[0].replace('Der ', '').replace('Die ', '').replace('Das ', '').replace('The ', '').lower())
elif self.sortorder == 'name_reverse':
lines.sort(key=lambda line: line.split(':::')[0].replace('Der ', '').replace('Die ', '').replace('Das ', '').replace('The ', '').lower(), reverse=True)
elif self.sortorder == 'rating':
lines.sort(key=lambda line: line.split(':::')[4])
elif self.sortorder == 'rating_reverse':
lines.sort(key=lambda line: line.split(':::')[4], reverse=True)
elif self.sortorder == 'year':
lines.sort(key=lambda line: line.split(':::')[8])
elif self.sortorder == 'year_reverse':
lines.sort(key=lambda line: line.split(':::')[8], reverse=True)
elif self.sortorder == 'date':
lines.sort(key=lambda line: line.split(':::')[2])
elif self.sortorder == 'date_reverse':
lines.sort(key=lambda line: line.split(':::')[2], reverse=True)
elif self.sortorder == 'folder':
lines.sort(key=lambda line: line.split(':::')[1])
elif self.sortorder == 'folder_reverse':
lines.sort(key=lambda line: line.split(':::')[1], reverse=True)
fsorted = open(self.database + '.sorted', 'w')
fsorted.writelines(lines)
fsorted.close()
os.rename(self.database + '.sorted', self.database)
def switchView(self):
if self.ready == True:
self.session.openWithCallback(self.exit, movieBrowserPosterwall, self.index, self.content, self.filter)
def toogleContent(self):
if self.ready == True:
if self.content == ':::Movie:::' or self.content == ':::':
self.content = ':::Series:::'
self.filter = ':::Series:::'
self.index = 0
if self.xd == True:
self.posterindex = 5
else:
self.posterindex = 6
self.makeMovies(self.filter)
else:
self.content = ':::Movie:::'
self.filter = ':::Movie:::'
self.index = 0
if self.xd == True:
self.posterindex = 5
else:
self.posterindex = 6
self.makeMovies(self.filter)
def editDatabase(self):
if self.ready == True:
self.session.openWithCallback(self.makeMovies, movieDatabase)
def wikipedia(self):
if self.ready == True:
if fileExists('/usr/lib/enigma2/python/Plugins/Extensions/Wikipedia/plugin.pyo'):
self.session.open(searchWikipedia, self.namelist[self.index], self.infolist[self.index][2], self.infolist[self.index][3])
else:
self.session.open(MessageBox, _('\nThe Wikipedia plugin could not be found.\n\nPlease download and install the plugin from:\nwww.kashmir-plugins.de'), MessageBox.TYPE_INFO)
return
def showPath(self):
if self.ready == True:
self.session.open(MessageBox, _('\nMovie File:\n%s') % self.movielist[self.index], MessageBox.TYPE_INFO)
def getIndex(self, list):
return list.getSelectedIndex()
def download(self, link, name):
getPage(link).addCallback(name).addErrback(self.downloadError)
def downloadError(self, output):
pass
def config(self):
if self.ready == True:
self.session.openWithCallback(self.exit, movieBrowserConfig)
def zap(self):
if self.ready == True:
servicelist = self.session.instantiateDialog(ChannelSelection)
self.session.execDialog(servicelist)
def hideScreen(self):
if self.hideflag == True:
self.hideflag = False
count = 40
if config.plugins.moviebrowser.m1v.value == 'yes':
while count > 0:
count -= 1
f = open('/proc/stb/video/alpha', 'w')
f.write('%i' % (config.plugins.moviebrowser.transparency.value * count / 40))
f.close()
else:
while count > 0:
count -= 1
f = open('/proc/stb/video/alpha', 'w')
f.write('%i' % (config.av.osd_alpha.value * count / 40))
f.close()
else:
self.hideflag = True
count = 0
if config.plugins.moviebrowser.m1v.value == 'yes':
while count < 40:
count += 1
f = open('/proc/stb/video/alpha', 'w')
f.write('%i' % (config.plugins.moviebrowser.transparency.value * count / 40))
f.close()
else:
while count < 40:
count += 1
f = open('/proc/stb/video/alpha', 'w')
f.write('%i' % (config.av.osd_alpha.value * count / 40))
f.close()
def exit(self):
if config.plugins.moviebrowser.showtv.value == 'hide' or config.plugins.moviebrowser.m1v.value == 'yes':
f = open('/proc/stb/video/alpha', 'w')
f.write('%i' % config.av.osd_alpha.value)
f.close()
self.session.nav.playService(self.oldService)
if self.hideflag == False:
f = open('/proc/stb/video/alpha', 'w')
f.write('%i' % config.av.osd_alpha.value)
f.close()
self.close()
class movieBrowserPosterwall(Screen):
def __init__(self, session, index, content, filter):
if config.plugins.moviebrowser.plugin_size.value == 'full':
self.xd = False
self.spaceTop = 0
self.spaceLeft = 16
self.spaceX = 5
self.spaceY = 5
self.picX = 133
self.picY = 200
self.posterX = 9
self.posterY = 3
self.posterALL = 27
self.posterREST = 0
else:
self.xd = True
self.spaceTop = 0
self.spaceLeft = 10
self.spaceX = 5
self.spaceY = 5
self.picX = 106
self.picY = 160
self.posterX = 9
self.posterY = 3
self.posterALL = 27
self.posterREST = 0
self.positionlist = []
skincontent = ''
numX = -1
for x in range(self.posterALL):
numY = x // self.posterX
numX += 1
if numX >= self.posterX:
numX = 0
posX = self.spaceLeft + self.spaceX + numX * (self.spaceX + self.picX)
posY = self.spaceTop + self.spaceY + numY * (self.spaceY + self.picY)
self.positionlist.append((posX - 10, posY - 10))
skincontent += '<widget name="poster' + str(x) + '" position="' + str(posX) + ',' + str(posY) + '" size="' + str(self.picX) + ',' + str(self.picY) + '" zPosition="10" transparent="1" alphatest="on" />'
skincontent += '<widget name="poster_back' + str(x) + '" position="' + str(posX) + ',' + str(posY) + '" size="' + str(self.picX) + ',' + str(self.picY) + '" zPosition="11" transparent="1" alphatest="blend" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/poster_backHD.png" />'
skin = '\n\t\t\t\t\t<screen position="center,center" size="1024,576" flags="wfNoBorder" title=" " >\n\t\t\t\t\t\t<widget name="backdrop" position="0,0" size="1024,576" alphatest="on" transparent="0" zPosition="1" />\n\t\t\t\t\t\t<widget name="infoback" position="5,500" size="1014,71" alphatest="blend" transparent="1" zPosition="2" />\n\n\t\t\t\t\t\t<widget name="ratings" position="15,524" size="210,21" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/ratings.png" borderWidth="0" orientation="orHorizontal" transparent="1" zPosition="3" />\n\t\t\t\t\t\t<widget name="ratingsback" position="15,524" size="210,21" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/ratings_back.png" alphatest="on" zPosition="4" />\n\t\t\t\t\t\t<widget name="ratingtext" position="235,500" size="40,71" font="Regular;24" foregroundColor="#FFFFFF" valign="center" transparent="1" zPosition="5" />\n\t\t\t\t\t\t<widget name="name" position="285,500" size="454,71" font="Regular;26" foregroundColor="#FFFFFF" halign="center" valign="center" transparent="1" zPosition="6" />\n\t\t\t\t\t\t<widget name="runtime" position="764,500" size="120,71" font="Regular;24" foregroundColor="#FFFFFF" halign="right" valign="center" transparent="1" zPosition="7" />\n\t\t\t\t\t\t<widget name="country" position="889,500" size="55,71" font="Regular;24" foregroundColor="#FFFFFF" halign="right" valign="center" transparent="1" zPosition="8" />\n\t\t\t\t\t\t<widget name="year" position="949,500" size="60,71" font="Regular;24" foregroundColor="#FFFFFF" halign="right" valign="center" transparent="1" zPosition="9" />\n\n\t\t\t\t\t\t<widget name="2infoback" position="15,15" size="460,400" alphatest="blend" transparent="1" zPosition="12" />\n\t\t\t\t\t\t<widget name="2name" position="25,16" size="440,55" font="Regular;24" foregroundColor="#FFFFFF" valign="center" transparent="1" zPosition="13" />\n\t\t\t\t\t\t<widget name="2Rating" position="25,70" size="125,25" font="Regular;20" halign="left" foregroundColor="{color}" transparent="1" zPosition="14" />\n\t\t\t\t\t\t<widget name="2ratings" position="25,100" size="210,21" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/ratings.png" borderWidth="0" orientation="orHorizontal" transparent="1" zPosition="15" />\n\t\t\t\t\t\t<widget name="2ratingsback" position="25,100" size="210,21" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/ratings_back.png" alphatest="on" zPosition="16" />\n\t\t\t\t\t\t<widget name="2ratingtext" position="245,100" size="40,25" font="Regular;20" foregroundColor="#FFFFFF" transparent="1" zPosition="17" />\n\t\t\t\t\t\t<widget name="2Director" position="25,140" size="125,25" font="Regular;20" halign="left" foregroundColor="{color}" transparent="1" zPosition="18" />\n\t\t\t\t\t\t<widget name="2director" position="25,170" size="285,50" font="Regular;20" foregroundColor="#FFFFFF" transparent="1" zPosition="19" />\n\t\t\t\t\t\t<widget name="2Country" position="320,140" size="125,25" font="Regular;20" halign="left" foregroundColor="{color}" transparent="1" zPosition="20" />\n\t\t\t\t\t\t<widget name="2country" position="320,170" size="125,25" font="Regular;20" foregroundColor="#FFFFFF" transparent="1" zPosition="21" />\n\t\t\t\t\t\t<widget name="2Actors" position="25,210" size="125,25" font="Regular;20" halign="left" foregroundColor="{color}" transparent="1" zPosition="22" />\n\t\t\t\t\t\t<widget name="2actors" position="25,240" size="285,95" font="Regular;20" foregroundColor="#FFFFFF" transparent="1" zPosition="23" />\n\t\t\t\t\t\t<widget name="2Year" position="320,210" size="125,25" font="Regular;20" halign="left" foregroundColor="{color}" transparent="1" zPosition="24" />\n\t\t\t\t\t\t<widget name="2year" position="320,240" size="125,25" font="Regular;20" foregroundColor="#FFFFFF" transparent="1" zPosition="25" />\n\t\t\t\t\t\t<widget name="2Runtime" position="320,280" size="125,25" font="Regular;20" halign="left" foregroundColor="{color}" transparent="1" zPosition="26" />\n\t\t\t\t\t\t<widget name="2runtime" position="320,310" size="125,25" font="Regular;20" foregroundColor="#FFFFFF" transparent="1" zPosition="27" />\n\t\t\t\t\t\t<widget name="2Genres" position="25,350" size="125,25" font="Regular;20" halign="left" foregroundColor="{color}" transparent="1" zPosition="28" />\n\t\t\t\t\t\t<widget name="2genres" position="25,380" size="440,25" font="Regular;20" foregroundColor="#FFFFFF" transparent="1" zPosition="29" />\n\n\t\t\t\t\t\t<widget name="plotfullback" position="549,15" size="460,400" alphatest="blend" transparent="1" zPosition="30" />\n\t\t\t\t\t\t<widget name="plotfull" position="559,22" size="440,390" font="{font}" foregroundColor="#FFFFFF" transparent="1" zPosition="31" />\n\t\t\t\t\t\t<widget name="eposter" position="25,50" size="440,330" alphatest="on" transparent="1" zPosition="32" />\n\n\t\t\t\t\t\t<widget name="frame" position="5,-5" size="126,180" zPosition="12" alphatest="on" />"\n\t\t\t\t\t\t' + skincontent + '\n\t\t\t\t\t</screen>'
skinHD = '\n\t\t\t\t\t<screen position="center,center" size="1280,720" flags="wfNoBorder" title=" " >\n\t\t\t\t\t\t<widget name="backdrop" position="0,0" size="1280,720" alphatest="on" transparent="0" zPosition="1" />\n\t\t\t\t\t\t<widget name="infoback" position="5,620" size="1270,95" alphatest="blend" transparent="1" zPosition="2" />\n\n\t\t\t\t\t\t<widget name="ratings" position="25,657" size="210,21" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/ratings.png" borderWidth="0" orientation="orHorizontal" transparent="1" zPosition="3" />\n\t\t\t\t\t\t<widget name="ratingsback" position="25,657" size="210,21" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/ratings_back.png" alphatest="on" zPosition="4" />\n\t\t\t\t\t\t<widget name="ratingtext" position="245,620" size="40,95" font="Regular;26" foregroundColor="#FFFFFF" valign="center" transparent="1" zPosition="5" />\n\t\t\t\t\t\t<widget name="name" position="295,620" size="690,95" font="Regular;28" foregroundColor="#FFFFFF" valign="center" halign="center" transparent="1" zPosition="6" />\n\t\t\t\t\t\t<widget name="runtime" position="1000,620" size="120,95" font="Regular;26" foregroundColor="#FFFFFF" halign="right" valign="center" transparent="1" zPosition="7" />\n\t\t\t\t\t\t<widget name="country" position="1125,620" size="60,95" font="Regular;26" foregroundColor="#FFFFFF" halign="right" valign="center" transparent="1" zPosition="8" />\n\t\t\t\t\t\t<widget name="year" position="1190,620" size="65,95" font="Regular;26" foregroundColor="#FFFFFF" halign="right" valign="center" transparent="1" zPosition="9" />\n\n\t\t\t\t\t\t<widget name="2infoback" position="25,25" size="525,430" alphatest="blend" transparent="1" zPosition="12" />\n\t\t\t\t\t\t<widget name="2name" position="40,30" size="495,70" font="Regular;28" foregroundColor="#FFFFFF" valign="center" transparent="1" zPosition="13" />\n\t\t\t\t\t\t<widget name="2Rating" position="40,100" size="125,28" font="Regular;22" halign="left" foregroundColor="{color}" transparent="1" zPosition="14" />\n\t\t\t\t\t\t<widget name="2ratings" position="40,130" size="210,21" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/ratings.png" borderWidth="0" orientation="orHorizontal" transparent="1" zPosition="15" />\n\t\t\t\t\t\t<widget name="2ratingsback" position="40,130" size="210,21" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/ratings_back.png" alphatest="on" zPosition="16" />\n\t\t\t\t\t\t<widget name="2ratingtext" position="260,130" size="50,28" font="Regular;22" foregroundColor="#FFFFFF" transparent="1" zPosition="17" />\n\t\t\t\t\t\t<widget name="2Director" position="40,170" size="125,28" font="Regular;22" halign="left" foregroundColor="{color}" transparent="1" zPosition="18" />\n\t\t\t\t\t\t<widget name="2director" position="40,200" size="320,28" font="Regular;22" foregroundColor="#FFFFFF" transparent="1" zPosition="19" />\n\t\t\t\t\t\t<widget name="2Country" position="370,170" size="125,28" font="Regular;22" halign="left" foregroundColor="{color}" transparent="1" zPosition="20" />\n\t\t\t\t\t\t<widget name="2country" position="370,200" size="125,28" font="Regular;22" foregroundColor="#FFFFFF" transparent="1" zPosition="21" />\n\t\t\t\t\t\t<widget name="2Actors" position="40,240" size="125,28" font="Regular;22" halign="left" foregroundColor="{color}" transparent="1" zPosition="22" />\n\t\t\t\t\t\t<widget name="2actors" position="40,270" size="320,102" font="Regular;22" foregroundColor="#FFFFFF" transparent="1" zPosition="23" />\n\t\t\t\t\t\t<widget name="2Year" position="370,240" size="125,28" font="Regular;22" halign="left" foregroundColor="{color}" transparent="1" zPosition="24" />\n\t\t\t\t\t\t<widget name="2year" position="370,270" size="125,28" font="Regular;22" foregroundColor="#FFFFFF" transparent="1" zPosition="25" />\n\t\t\t\t\t\t<widget name="2Runtime" position="370,310" size="125,28" font="Regular;22" halign="left" foregroundColor="{color}" transparent="1" zPosition="26" />\n\t\t\t\t\t\t<widget name="2runtime" position="370,340" size="125,28" font="Regular;22" foregroundColor="#FFFFFF" transparent="1" zPosition="27" />\n\t\t\t\t\t\t<widget name="2Genres" position="40,380" size="125,28" font="Regular;22" halign="left" foregroundColor="{color}" transparent="1" zPosition="28" />\n\t\t\t\t\t\t<widget name="2genres" position="40,410" size="500,28" font="Regular;22" foregroundColor="#FFFFFF" transparent="1" zPosition="29" />\n\n\t\t\t\t\t\t<widget name="plotfullback" position="730,25" size="525,430" alphatest="blend" transparent="1" zPosition="30" />\n\t\t\t\t\t\t<widget name="plotfull" position="745,40" size="495,393" font="{font}" foregroundColor="#FFFFFF" transparent="1" zPosition="31" />\n\t\t\t\t\t\t<widget name="eposter" position="37,53" size="500,375" alphatest="on" transparent="1" zPosition="32" />\n\n\t\t\t\t\t\t<widget name="frame" position="11,-5" size="153,220" zPosition="12" alphatest="on" />"\n\t\t\t\t\t\t' + skincontent + '\n\t\t\t\t\t</screen>'
if self.xd == False:
color = config.plugins.moviebrowser.color.value
if config.plugins.moviebrowser.plotfont.value == 'normal':
font = 'Regular;22'
else:
font = 'Regular;20'
self.dict = {'color': color,
'font': font}
self.skin = applySkinVars(skinHD, self.dict)
else:
color = config.plugins.moviebrowser.color.value
if config.plugins.moviebrowser.plotfont.value == 'normal':
font = 'Regular;20'
else:
font = 'Regular;18'
self.dict = {'color': color,
'font': font}
self.skin = applySkinVars(skin, self.dict)
Screen.__init__(self, session)
self.oldService = self.session.nav.getCurrentlyPlayingServiceReference()
self.hideflag = True
self.ready = False
self.renew = False
self.update = False
self.infofull = False
self.plotfull = False
self.tmdbposter = False
self.index = index
self.wallindex = self.index % self.posterALL
self.pagecount = self.index // self.posterALL + 1
self.oldindex = 0
self.pagemax = 1
self.content = content
self.filter = filter
if config.plugins.moviebrowser.language.value == 'de':
self.language = '&language=de'
elif config.plugins.moviebrowser.language.value == 'es':
self.language = '&language=es'
elif config.plugins.moviebrowser.language.value == 'ru':
self.language = '&language=ru'
else:
self.language = '&language=en'
if config.plugins.moviebrowser.database.value == 'tmdb':
self.firstdatabase = 'tmdb'
elif config.plugins.moviebrowser.database.value == 'imdb':
self.firstdatabase = 'imdb'
else:
self.firstdatabase = 'tvdb'
if config.plugins.moviebrowser.plotfull.value == 'show':
self.showplotfull = True
else:
self.showplotfull = False
self.namelist = []
self.movielist = []
self.datelist = []
self.infolist = []
self.plotlist = []
self.posterlist = []
self.backdroplist = []
self.contentlist = []
self['name'] = Label()
self['runtime'] = Label()
self['country'] = Label()
self['year'] = Label()
self['ratingtext'] = Label()
self['ratings'] = ProgressBar()
self['ratings'].hide()
self['ratingsback'] = Pixmap()
self['ratingsback'].hide()
self['infoback'] = Pixmap()
self['frame'] = Pixmap()
self['backdrop'] = Pixmap()
if config.plugins.moviebrowser.backdrops.value == 'show':
self.backdrops = True
else:
self.backdrops = False
for x in range(self.posterALL):
self['poster' + str(x)] = Pixmap()
self['poster_back' + str(x)] = Pixmap()
self['2name'] = Label()
self['2Director'] = Label()
self['2director'] = Label()
self['2Actors'] = Label()
self['2actors'] = Label()
self['2Year'] = Label()
self['2year'] = Label()
self['2Runtime'] = Label()
self['2runtime'] = Label()
self['2Country'] = Label()
self['2country'] = Label()
self['2Genres'] = Label()
self['2genres'] = Label()
self['2Rating'] = Label()
self['2ratingtext'] = Label()
self['2ratings'] = ProgressBar()
self['2ratings'].hide()
self['2ratingsback'] = Pixmap()
self['2ratingsback'].hide()
self['2infoback'] = Pixmap()
self['2infoback'].hide()
self['plotfull'] = Label()
self['plotfull'].hide()
self['plotfullback'] = Pixmap()
self['plotfullback'].hide()
self['eposter'] = Pixmap()
self['eposter'].hide()
self['actions'] = ActionMap(['OkCancelActions',
'DirectionActions',
'ColorActions',
'ChannelSelectBaseActions',
'HelpActions',
'InfobarMovieListActions',
'InfobarTeletextActions',
'MovieSelectionActions',
'MoviePlayerActions',
'NumberActions'], {'ok': self.ok,
'cancel': self.exit,
'right': self.rightDown,
'left': self.leftUp,
'down': self.down,
'up': self.up,
'nextBouquet': self.zap,
'prevBouquet': self.zap,
'red': self.deleteMovie,
'yellow': self.renewIMDb,
'green': self.renewTMDb,
#'blue': self.hideScreen,
'contextMenu': self.config,
'showEventInfo': self.toggleInfoFull,
'startTeletext': self.editDatabase,
'leavePlayer': self.toggleBackdrops,
'movieList': self.updateDatabase,
'1': self.showMovies,
'2': self.switchView,
'3': self.showPath,
'4': self.filterSeasons,
'5': self.toogleContent,
#'6': self.wikipedia,
'7': self.filterDirector,
'8': self.filterActor,
'9': self.filterGenre,
'0': self.gotoEnd,
#'displayHelp': self.infoScreen
}, -1)
cmd = "mkdir /usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/db/;mkdir /usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/db/cache"
os.system(cmd)
self.updatefile = '/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/db/update'
self.blacklist = '/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/db/blacklist'
self.database = '/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/db/database'
self.onLayoutFinish.append(self.onLayoutFinished)
def onLayoutFinished(self):
if config.plugins.moviebrowser.showtv.value == 'hide':
self.session.nav.stopService()
if config.plugins.moviebrowser.m1v.value == 'yes':
self.session.nav.stopService()
f = open('/proc/stb/video/alpha', 'w')
f.write('%i' % config.plugins.moviebrowser.transparency.value)
f.close()
if self.xd == False:
self.infoBackPNG = '/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/info_backHD.png'
self.infosmallBackPNG = '/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/info_small_backHD.png'
InfoBack = loadPic(self.infosmallBackPNG, 1270, 95, 3, 0, 0, 1)
else:
self.infoBackPNG = '/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/info_back.png'
self.infosmallBackPNG = '/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/info_small_back.png'
InfoBack = loadPic(self.infosmallBackPNG, 1014, 71, 3, 0, 0, 1)
if InfoBack != None:
self['infoback'].instance.setPixmap(InfoBack)
self['infoback'].show()
if fileExists(self.database):
if fileExists(self.updatefile):
self.sortDatabase()
os.remove(self.updatefile)
self.reset = False
self.makeMovieBrowserTimer = eTimer()
self.makeMovieBrowserTimer.callback.append(self.makeMovies(self.filter))
self.makeMovieBrowserTimer.start(500, True)
else:
self.openTimer = eTimer()
self.openTimer.callback.append(self.openInfo)
self.openTimer.start(500, True)
def openInfo(self):
if fileExists('/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/db/reset'):
self.session.openWithCallback(self.reset_return, MessageBox, _('\nThe Movie Browser Database will be build now.\nDepending on the number of your movies this can take several minutes.\n\nBuild Movie Browser Database now?'), MessageBox.TYPE_YESNO)
else:
self.session.openWithCallback(self.first_return, MessageBox, _('\nBefore the Movie Browser Database will be build for the first time,\nyou should check your Movie Folder setting and change the\nCache Folder to a hard drive disk for faster access or to an sub stick.'), MessageBox.TYPE_YESNO)
def first_return(self, answer):
if answer is True:
open('/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/db/reset', 'w').close()
self.session.openWithCallback(self.exit, movieBrowserConfig)
else:
self.close()
def reset_return(self, answer):
if answer is True:
self.reset = True
if fileExists('/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/db/reset'):
os.remove('/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/db/reset')
self.resetTimer = eTimer()
self.resetTimer.callback.append(self.database_return(True))
self.resetTimer.start(500, True)
else:
self.close()
def makeMovies(self, filter):
self.namelist = []
self.movielist = []
self.datelist = []
self.infolist = []
self.plotlist = []
self.posterlist = []
self.backdroplist = []
self.contentlist = []
self.filter = filter
if fileExists(self.database):
f = open(self.database, 'r')
for line in f:
if self.content in line and filter in line:
movieline = line.split(':::')
try:
name = movieline[0]
except IndexError:
name = ' '
try:
filename = movieline[1]
except IndexError:
filename = ' '
try:
date = movieline[2]
except IndexError:
date = ' '
try:
runtime = movieline[3]
except IndexError:
runtime = ' '
try:
rating = movieline[4]
except IndexError:
rating = ' '
try:
director = movieline[5]
except IndexError:
director = ' '
try:
actors = movieline[6]
except IndexError:
actors = ' '
try:
genres = movieline[7]
except IndexError:
genres = ' '
try:
year = movieline[8]
except IndexError:
year = ' '
try:
country = movieline[9]
except IndexError:
country = ' '
try:
plotfull = movieline[10]
except IndexError:
plotfull = ' '
try:
poster = movieline[11]
except IndexError:
poster = 'http://cf2.imgobject.com/t/p/w154' + '/default_poster.png'
try:
backdrop = movieline[12]
except IndexError:
backdrop = 'http://cf2.imgobject.com/t/p/w1280' + '/default_backdrop.png'
try:
content = movieline[13]
except IndexError:
content = 'Series'
self.namelist.append(name)
self.movielist.append(filename)
self.datelist.append(date)
res = []
res.append(runtime)
res.append(rating)
res.append(director)
res.append(actors)
res.append(genres)
res.append(year)
res.append(country)
self.infolist.append(res)
self.plotlist.append(plotfull)
self.posterlist.append(poster)
self.backdroplist.append(backdrop)
self.contentlist.append(content)
f.close()
self.maxentry = len(self.namelist)
if self.maxentry == 0:
size = os.path.getsize(self.database)
self.ready = True
if size < 10:
os.remove(self.database)
else:
self.posterREST = self.maxentry % self.posterALL
if self.posterREST == 0:
self.posterREST = self.posterALL
self.pagemax = self.maxentry // self.posterALL
if self.maxentry % self.posterALL > 0:
self.pagemax += 1
self.makePoster(self.pagecount - 1)
self.paintFrame()
if self.backdrops == True:
try:
self.showBackdrops(self.index)
except IndexError:
pass
else:
self.hideBackdrops()
try:
self.makeName(self.index)
except IndexError:
pass
try:
self.makeInfo(self.index)
except IndexError:
pass
if self.infofull == True and self.plotfull == False:
try:
self.showInfoFull(self.index)
except IndexError:
pass
elif self.infofull == True and self.plotfull == True:
try:
self.showPlotFull(self.index)
except IndexError:
pass
self.ready = True
def updateDatabase(self):
if self.ready == True:
if os.path.exists(config.plugins.moviebrowser.moviefolder.value):
self.session.openWithCallback(self.database_return, MessageBox, _('\nUpdate Movie Browser Database?'), MessageBox.TYPE_YESNO)
else:
self.session.open(MessageBox, _('\nMovie Folder %s not reachable.\nMovie Browser Database Update canceled.') % str(config.plugins.moviebrowser.moviefolder.value), MessageBox.TYPE_ERROR)
def database_return(self, answer):
if answer is True:
open(self.updatefile, 'w').close()
self.update = True
self.ready = False
self.namelist = []
self.movielist = []
self.datelist = []
self.infolist = []
self.plotlist = []
self.posterlist = []
self.backdroplist = []
self.orphaned = 0
if fileExists(self.database):
allfiles = ':::'
folder = config.plugins.moviebrowser.moviefolder.value
for root, dirs, files in os.walk(folder, topdown=False):
for name in files:
filename = os.path.join(root, name)
filedate = os.path.getctime(filename)
allfiles = allfiles + str(filedate)
data = open(self.database).read()
for line in data.split('\n'):
movieline = line.split(':::')
try:
moviefolder = movieline[1]
moviedate = movieline[2]
except IndexError:
moviefolder = ''
moviedate = ''
if search(config.plugins.moviebrowser.moviefolder.value, moviefolder) is not None and search(moviedate, allfiles) is None:
self.orphaned += 1
data = data.replace(line + '\n', '')
os.rename(self.database, self.database + '-backup')
f = open(self.database, 'w')
f.write(data)
f.close()
del allfiles
data = open(self.database).read()
else:
open(self.database, 'w').close()
data = ''
if fileExists(self.blacklist):
blacklist = open(self.blacklist).read()
alldata = data + blacklist
else:
alldata = data
folder = config.plugins.moviebrowser.moviefolder.value
for root, dirs, files in os.walk(folder, topdown=False):
for name in files:
movie = sub('\\(', '.', name)
movie = sub('\\)', '.', movie)
if search(movie, alldata) is None:
if name.endswith('.ts') or name.endswith('.avi') or name.endswith('.divx') or name.endswith('.flv') or name.endswith('.iso') or name.endswith('.ISO') or name.endswith('.m2ts') or name.endswith('.mov') or name.endswith('.mp4') or name.endswith('.mpg') or name.endswith('.mpeg') or name.endswith('.mkv') or name.endswith('.vob'):
filename = os.path.join(root, name)
self.movielist.append(filename)
self.datelist.append(os.path.getctime(filename))
if name.endswith('.ts'):
name = sub('.*? - .*? - ', '', name)
name = sub('[.]ts', '', name)
else:
name = sub('[.]avi', '', name)
name = sub('[.]divx', '', name)
name = sub('[.]flv', '', name)
name = sub('[.]iso', '', name)
name = sub('[.]ISO', '', name)
name = sub('[.]m2ts', '', name)
name = sub('[.]mov', '', name)
name = sub('[.]mp4', '', name)
name = sub('[.]mpg', '', name)
name = sub('[.]mpeg', '', name)
name = sub('[.]mkv', '', name)
name = sub('[.]vob', '', name)
print name
self.namelist.append(name)
self.dbcount = 1
self.dbcountmax = len(self.movielist)
if self.dbcountmax == 0:
self.finished_update(False)
else:
self.name = self.namelist[0]
if config.plugins.moviebrowser.database.value == 'tmdb':
movie = self.name.replace(' ', '+').replace(':', '+').replace('-', '+').replace('_', '+')
self.firstdatabase = 'tmdb'
url = 'http://api.themoviedb.org/3/search/movie?api_key=dfc629f7ff6936a269f8c5cdb194c890&query=' + movie + self.language
self.getTMDbData(url, 1, '0', False)
elif config.plugins.moviebrowser.database.value == 'imdb':
movie = self.name.replace(' ', '+').replace(':', '+').replace('_', '+')
self.firstdatabase = 'imdb'
url = 'http://imdbapi.org/?title=%s&type=xml&plot=full&episode=0&limit=1&yg=0&mt=none&lang=en-US&offset=&aka=simple&release=simple&business=0&tech=0' % movie
self.getIMDbData(url, 1)
else:
movie = self.name.replace(' ', '+').replace(':', '+').replace('_', '+')
self.firstdatabase = 'tvdb'
movie = movie + 'FIN'
movie = sub('[Ss][0-9]+[Ee][0-9]+.*?FIN', '', movie)
movie = sub('FIN', '', movie)
url = 'http://www.thetvdb.com/api/GetSeries.php?seriesname=' + movie
self.getTVDbData(url, 1, '0')
def getIMDbData(self, url, runlevel):
agents = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)'}
request = Request(url, headers=agents)
try:
output = urlopen(request).read()
output = output.replace('\\u00e4', '\xc3\xa4').replace('\\u00f6', '\xc3\xb6').replace('\\u00fc', '\xc3\xbc').replace('\\u00c4', '\xc3\x84').replace('\\u00f6', '\xc3\x9f').replace('\\u00dc', '\xc3\x9c').replace('\\u00df', '\xc3\x9f').replace('\\u0026', '&').replace('\\u00e9', '\xc3\xa9').replace('\\u00e5', '\xc3\xa5').replace('\\"', '').replace('&', '&')
except URLError:
output = ''
except HTTPError:
output = ''
except socket.error:
output = ''
if search('"error":"Film not found"', output) is not None and runlevel == 1:
text = self.name.replace(' ', '%20')
self.trans = 'imdb'
self.translateGoogle(text)
elif search('"error":"Film not found"', output) is not None and runlevel == 2:
movie = self.name.replace(' ', '+').replace(':', '+').replace('_', '+')
movie = movie + 'FIN'
movie = sub('[Ss][0-9]+[Ee][0-9]+.*?FIN', '', movie)
movie = sub('FIN', '', movie)
url = 'http://www.thetvdb.com/api/GetSeries.php?seriesname=' + movie
self.getTVDbData(url, 1, '0')
else:
name = re.findall('<title>(.*?)</title>', output)
runtime = re.findall('<runtime><item>.*?([0-9]+ min).*?</item>', output)
rating = re.findall('<rating>(.*?)</rating>', output)
director = re.findall('<directors><item>(.*?)</item>', output)
actors = re.findall('<actors>(.*?)</actors>', output)
try:
actor = re.findall('<item>(.*?)</item>', actors[0])
except IndexError:
actor = []
genres = re.findall('<genres>(.*?)</genres>', output)
try:
genre = re.findall('<item>(.*?)</item>', genres[0])
except IndexError:
genre = []
year = re.findall('<year>(.*?)</year>', output)
country = re.findall('<country><item>(.*?)</item>', output)
plotfull = re.findall('<plot>(.*?)</plot>', output)
try:
self.namelist[self.dbcount - 1] = name[0]
except IndexError:
self.namelist[self.dbcount - 1] = self.name
res = []
try:
res.append(runtime[0])
except IndexError:
res.append(' ')
try:
res.append(rating[0])
except IndexError:
res.append('0.0')
try:
res.append(director[0])
except IndexError:
res.append(' ')
try:
actors = actor[0]
except IndexError:
actors = ' '
try:
actors = actors + ', ' + actor[1]
except IndexError:
pass
try:
actors = actors + ', ' + actor[2]
except IndexError:
pass
try:
actors = actors + ', ' + actor[3]
except IndexError:
pass
try:
actors = actors + ', ' + actor[4]
except IndexError:
pass
try:
actors = actors + ', ' + actor[5]
except IndexError:
pass
if len(actors) < 95:
try:
actors = actors + ', ' + actor[6]
except IndexError:
pass
res.append(actors)
try:
genres = genre[0]
except IndexError:
genres = ' '
try:
genres = genres + ', ' + genre[1]
except IndexError:
pass
try:
genres = genres + ', ' + genre[2]
except IndexError:
pass
try:
genres = genres + ', ' + genre[3]
except IndexError:
pass
try:
genres = genres + ', ' + genre[4]
except IndexError:
pass
try:
res.append(genres)
except IndexError:
res.append(' ')
try:
res.append(year[0])
except IndexError:
res.append(' ')
try:
res.append(country[0].replace('Germany', 'GER'))
except IndexError:
res.append(' ')
self.infolist.append(res)
try:
self.plotlist.append(plotfull[0].replace('\\', ''))
except IndexError:
self.plotlist.append(' ')
movie = self.name.replace(' ', '+').replace(':', '+').replace('-', '+').replace('_', '+')
url = 'http://api.themoviedb.org/3/search/movie?api_key=dfc629f7ff6936a269f8c5cdb194c890&query=' + movie + self.language
self.getTMDbPoster(url, 1)
def getTMDbPoster(self, url, runlevel):
self.tmdbposter = True
headers = {'Accept': 'application/json'}
request = Request(url, headers=headers)
try:
output = urlopen(request).read()
except URLError:
output = ''
except HTTPError:
output = ''
except socket.error:
output = ''
if search('"total_results":0', output) is not None and runlevel == 1:
text = self.name.replace(' ', '%20')
self.trans = 'tmdbposter'
self.translateGoogle(text)
else:
backdrop = re.findall('"backdrop_path":"(.*?)"', output)
poster = re.findall('"poster_path":"(.*?)"', output)
try:
self.backdroplist.append('http://cf2.imgobject.com/t/p/w1280' + backdrop[0])
except IndexError:
self.backdroplist.append('http://cf2.imgobject.com/t/p/w1280' + '/default_backdrop.png')
try:
self.posterlist.append('http://cf2.imgobject.com/t/p/w154' + poster[0])
except IndexError:
self.posterlist.append('http://cf2.imgobject.com/t/p/w154' + '/default_poster.png')
self.tmdbposter = False
self.makeDataEntry(self.dbcount - 1, True)
def getTMDbData(self, url, runlevel, tmdbid, renew):
headers = {'Accept': 'application/json'}
request = Request(url, headers=headers)
try:
output = urlopen(request).read()
except URLError:
output = ''
except HTTPError:
output = ''
except socket.error:
output = ''
if search('"total_results":0', output) is not None and runlevel == 1:
text = self.name.replace(' ', '%20')
self.trans = 'tmdb'
self.translateGoogle(text)
elif search('"total_results":0', output) is not None and runlevel == 2:
movie = self.name.replace(' ', '+').replace(':', '+').replace('_', '+')
movie = movie + 'FIN'
movie = sub('[Ss][0-9]+[Ee][0-9]+.*?FIN', '', movie)
movie = sub('FIN', '', movie)
url = 'http://www.thetvdb.com/api/GetSeries.php?seriesname=' + movie
self.getTVDbData(url, 1, '0')
else:
if tmdbid == '0':
tmdbid = re.findall('"id":(.*?),', output)
try:
tmdbid = tmdbid[0]
except IndexError:
tmdbid = '0'
name = re.findall('"title":"(.*?)"', output)
backdrop = re.findall('"backdrop_path":"(.*?)"', output)
year = re.findall('"release_date":"(.*?)-', output)
poster = re.findall('"poster_path":"(.*?)"', output)
rating = re.findall('"vote_average":(.*?),', output)
try:
self.namelist[self.dbcount - 1] = name[0]
except IndexError:
self.namelist[self.dbcount - 1] = self.name
try:
self.backdroplist.append('http://cf2.imgobject.com/t/p/w1280' + backdrop[0])
except IndexError:
self.backdroplist.append('http://cf2.imgobject.com/t/p/w1280' + '/default_backdrop.png')
try:
self.posterlist.append('http://cf2.imgobject.com/t/p/w154' + poster[0])
except IndexError:
self.posterlist.append('http://cf2.imgobject.com/t/p/w154' + '/default_poster.png')
url = 'http://api.themoviedb.org/3/movie/%s?api_key=dfc629f7ff6936a269f8c5cdb194c890' % tmdbid + self.language
headers = {'Accept': 'application/json'}
request = Request(url, headers=headers)
try:
output = urlopen(request).read()
except URLError:
output = ''
except HTTPError:
output = ''
except socket.error:
output = ''
plot = re.findall('"overview":"(.*?)","', output)
if renew == True:
output = sub('"belongs_to_collection":{.*?}', '', output)
name = re.findall('"title":"(.*?)"', output)
backdrop = re.findall('"backdrop_path":"(.*?)"', output)
poster = re.findall('"poster_path":"(.*?)"', output)
url = 'http://api.themoviedb.org/3/movie/%s?api_key=dfc629f7ff6936a269f8c5cdb194c890' % tmdbid
headers = {'Accept': 'application/json'}
request = Request(url, headers=headers)
try:
output = urlopen(request).read()
except URLError:
output = ''
except HTTPError:
output = ''
except socket.error:
output = ''
output = sub('"belongs_to_collection":{.*?}', '', output)
if not plot:
plot = re.findall('"overview":"(.*?)","', output)
genre = re.findall('"genres":[[]."id":[0-9]+,"name":"(.*?)"', output)
genre2 = re.findall('"genres":[[]."id":[0-9]+,"name":".*?".,."id":[0-9]+,"name":"(.*?)"', output)
genre3 = re.findall('"genres":[[]."id":[0-9]+,"name":".*?".,."id":[0-9]+,"name":".*?".,."id":[0-9]+,"name":"(.*?)"', output)
genre4 = re.findall('"genres":[[]."id":[0-9]+,"name":".*?".,."id":[0-9]+,"name":".*?".,."id":[0-9]+,"name":".*?".,."id":[0-9]+,"name":"(.*?)"', output)
genre5 = re.findall('"genres":[[]."id":[0-9]+,"name":".*?".,."id":[0-9]+,"name":".*?".,."id":[0-9]+,"name":".*?".,."id":[0-9]+,"name":".*?".,."id":[0-9]+,"name":"(.*?)"', output)
country = re.findall('"iso_3166_1":"(.*?)"', output)
runtime = re.findall('"runtime":(.*?),', output)
if renew == True:
year = re.findall('"release_date":"(.*?)-', output)
rating = re.findall('"vote_average":(.*?),', output)
if not backdrop:
backdrop = re.findall('"backdrop_path":"(.*?)"', output)
if not poster:
poster = re.findall('"poster_path":"(.*?)"', output)
try:
self.namelist[self.dbcount - 1] = name[0]
except IndexError:
self.namelist[self.dbcount - 1] = self.name
try:
self.backdroplist.append('http://cf2.imgobject.com/t/p/w1280' + backdrop[0])
except IndexError:
self.backdroplist.append('http://cf2.imgobject.com/t/p/w1280' + '/default_backdrop.png')
try:
self.posterlist.append('http://cf2.imgobject.com/t/p/w154' + poster[0])
except IndexError:
self.posterlist.append('http://cf2.imgobject.com/t/p/w154' + '/default_poster.png')
url = 'http://api.themoviedb.org/3/movie/%s/casts?api_key=dfc629f7ff6936a269f8c5cdb194c890' % tmdbid + self.language
headers = {'Accept': 'application/json'}
request = Request(url, headers=headers)
try:
output = urlopen(request).read()
except URLError:
output = ''
except HTTPError:
output = ''
except socket.error:
output = ''
actor = re.findall('"name":"(.*?)"', output)
actor2 = re.findall('"name":".*?"name":"(.*?)"', output)
actor3 = re.findall('"name":".*?"name":".*?"name":"(.*?)"', output)
actor4 = re.findall('"name":".*?"name":".*?"name":".*?"name":"(.*?)"', output)
actor5 = re.findall('"name":".*?"name":".*?"name":".*?"name":".*?"name":"(.*?)"', output)
actor6 = re.findall('"name":".*?"name":".*?"name":".*?"name":".*?"name":".*?"name":"(.*?)"', output)
actor7 = re.findall('"name":".*?"name":".*?"name":".*?"name":".*?"name":".*?"name":".*?"name":"(.*?)"', output)
director = re.findall('"([^"]+)","department":"Directing","job":"Director"', output)
res = []
try:
res.append(runtime[0] + ' min')
except IndexError:
res.append(' ')
try:
res.append(rating[0])
except IndexError:
res.append('0.0')
try:
res.append(director[0])
except IndexError:
res.append(' ')
try:
actors = actor[0]
except IndexError:
actors = ' '
try:
actors = actors + ', ' + actor2[0]
except IndexError:
pass
try:
actors = actors + ', ' + actor3[0]
except IndexError:
pass
try:
actors = actors + ', ' + actor4[0]
except IndexError:
pass
try:
actors = actors + ', ' + actor5[0]
except IndexError:
pass
try:
actors = actors + ', ' + actor6[0]
except IndexError:
pass
if len(actors) < 95:
try:
actors = actors + ', ' + actor7[0]
except IndexError:
pass
res.append(actors)
try:
genres = genre[0]
except IndexError:
genres = ' '
try:
genres = genres + ', ' + genre2[0]
except IndexError:
pass
try:
genres = genres + ', ' + genre3[0]
except IndexError:
pass
try:
genres = genres + ', ' + genre4[0]
except IndexError:
pass
try:
genres = genres + ', ' + genre5[0]
except IndexError:
pass
res.append(genres.replace('Science Fiction', 'Sci-Fi'))
try:
res.append(year[0])
except IndexError:
res.append(' ')
try:
res.append(country[0].replace('US', 'USA'))
except IndexError:
res.append(' ')
self.infolist.append(res)
try:
self.plotlist.append(plot[0].replace('\\', ''))
except IndexError:
self.plotlist.append(' ')
self.makeDataEntry(self.dbcount - 1, True)
def translateGoogle(self, text):
if config.plugins.moviebrowser.language.value == 'de':
url = 'http://translate.google.com/m?hl=en&sl=de&q=%s' % text.title()
elif config.plugins.moviebrowser.language.value == 'es':
url = 'http://translate.google.com/m?hl=en&sl=es&q=%s' % text.title()
elif config.plugins.moviebrowser.language.value == 'ru':
url = 'http://translate.google.com/m?hl=en&sl=ru&q=%s' % text.title()
else:
url = 'http://translate.google.com/m?hl=en&sl=en&q=%s' % text.title()
agents = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)'}
before_trans = 'class="t0">'
request = Request(url, headers=agents)
try:
output = urlopen(request).read()
data = output[output.find(before_trans) + len(before_trans):]
movie = data.split('<')[0]
print '%s >> %s' % (text, movie)
except URLError:
movie = text
except HTTPError:
movie = text
except socket.error:
movie = text
if self.trans == 'imdb':
movie = movie.replace(' ', '+').replace(':', '+').replace('_', '+')
url = 'http://imdbapi.org/?title=%s&type=xml&plot=full&episode=0&limit=1&yg=0&mt=none&lang=en-US&offset=&aka=simple&release=simple&business=0&tech=0' % movie
self.getIMDbData(url, 2)
elif self.trans == 'tmdbposter':
movie = movie.replace(' ', '+').replace(':', '+').replace('-', '+').replace('_', '+')
url = 'http://api.themoviedb.org/3/search/movie?api_key=dfc629f7ff6936a269f8c5cdb194c890&query=' + movie + self.language
self.getTMDbPoster(url, 2)
elif self.trans == 'tmdb':
movie = movie.replace(' ', '+').replace(':', '+').replace('-', '+').replace('_', '+')
url = 'http://api.themoviedb.org/3/search/movie?api_key=dfc629f7ff6936a269f8c5cdb194c890&query=' + movie + self.language
self.getTMDbData(url, 2, '0', False)
elif self.trans == 'tvdb':
movie = movie.replace(' ', '+').replace(':', '+').replace('_', '+')
url = 'http://www.thetvdb.com/api/GetSeries.php?seriesname=' + movie
self.getTVDbData(url, 2, '0')
def getTVDbData(self, url, runlevel, seriesid):
agents = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)'}
request = Request(url, headers=agents)
try:
output = urlopen(request).read()
except URLError:
output = ''
except HTTPError:
output = ''
except socket.error:
output = ''
if search('<Series>', output) is None and runlevel == 1:
text = self.name.replace(' ', '%20')
text = text + 'FIN'
text = sub('[Ss][0-9]+[Ee][0-9]+.*?FIN', '', text)
text = sub('FIN', '', text)
self.trans = 'tvdb'
self.translateGoogle(text)
elif search('<Series>', output) is None and runlevel == 2:
self.backdroplist.append('http://cf2.imgobject.com/t/p/w1280' + '/default_backdrop.png')
self.posterlist.append('http://cf2.imgobject.com/t/p/w154' + '/default_poster.png')
self.namelist[self.dbcount - 1] = self.name
res = []
res.append(' ')
res.append('0.0')
res.append(' ')
res.append(' ')
res.append(' ')
res.append(' ')
res.append(' ')
self.infolist.append(res)
self.plotlist.append(' ')
self.makeDataEntry(self.dbcount - 1, False)
else:
if seriesid == '0':
seriesid = re.findall('<seriesid>(.*?)</seriesid>', output)
try:
seriesid = seriesid[0]
except IndexError:
seriesid = '0'
if search('[Ss][0-9]+[Ee][0-9]+', self.name) is not None:
data = search('([Ss][0-9]+[Ee][0-9]+)', self.name)
data = data.group(1)
season = search('[Ss]([0-9]+)[Ee]', data)
season = season.group(1).lstrip('0')
episode = search('[Ss][0-9]+[Ee]([0-9]+)', data)
episode = episode.group(1).lstrip('0')
url = 'http://www.thetvdb.com/api/D19315B88B2DE21F/series/' + seriesid + '/default/' + season + '/' + episode + '/' + config.plugins.moviebrowser.language.value + '.xml'
agents = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)'}
request = Request(url, headers=agents)
try:
output = urlopen(request).read()
except URLError:
output = ''
except HTTPError:
output = ''
except socket.error:
output = ''
output = sub('&', '&', output)
episode = re.findall('<EpisodeName>(.*?)</EpisodeName>', output)
year = re.findall('<FirstAired>([0-9]+)-', output)
guest = re.findall('<GuestStars>[|](.*?)[|]</GuestStars>', output)
director = re.findall('<Director>[|](.*?)[|]', output)
if not director:
director = re.findall('<Director>(.*?)</Director>', output)
plotfull = re.findall('<Overview>(.*?)</Overview>', output, re.S)
rating = re.findall('<Rating>(.*?)</Rating>', output)
eposter = re.findall('<filename>(.*?)</filename>', output)
else:
data = ''
episode = []
year = []
guest = []
director = []
plotfull = []
rating = []
eposter = []
url = 'http://www.thetvdb.com/data/series/' + seriesid + '/' + config.plugins.moviebrowser.language.value + '.xml'
agents = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)'}
request = Request(url, headers=agents)
try:
output = urlopen(request).read()
except URLError:
output = ''
except HTTPError:
output = ''
except socket.error:
output = ''
output = sub('&', '&', output)
name = re.findall('<SeriesName>(.*?)</SeriesName>', output)
runtime = re.findall('<Runtime>(.*?)</Runtime>', output)
if not rating:
rating = re.findall('<Rating>(.*?)</Rating>', output)
actors = re.findall('<Actors>(.*?)</Actors>', output)
try:
actor = re.findall('[|](.*?)[|]', actors[0])
except IndexError:
actor = []
try:
actor2 = re.findall('[|].*?[|](.*?)[|]', actors[0])
except IndexError:
actor2 = []
try:
actor3 = re.findall('[|].*?[|].*?[|](.*?)[|]', actors[0])
except IndexError:
actor3 = []
try:
actor4 = re.findall('[|].*?[|].*?[|].*?[|](.*?)[|]', actors[0])
except IndexError:
actor4 = []
try:
actor5 = re.findall('[|].*?[|].*?[|].*?[|].*?[|](.*?)[|]', actors[0])
except IndexError:
actor5 = []
try:
actor6 = re.findall('[|].*?[|].*?[|].*?[|].*?[|].*?[|](.*?)[|]', actors[0])
except IndexError:
actor6 = []
try:
actor7 = re.findall('[|].*?[|].*?[|].*?[|].*?[|].*?[|].*?[|](.*?)[|]', actors[0])
except IndexError:
actor7 = []
genres = re.findall('<Genre>(.*?)</Genre>', output)
try:
genre = re.findall('[|](.*?)[|]', genres[0])
except IndexError:
genre = []
try:
genre2 = re.findall('[|].*?[|](.*?)[|]', genres[0])
except IndexError:
genre2 = []
try:
genre3 = re.findall('[|].*?[|].*?[|](.*?)[|]', genres[0])
except IndexError:
genre3 = []
try:
genre4 = re.findall('[|].*?[|].*?[|].*?[|](.*?)[|]', genres[0])
except IndexError:
genre4 = []
try:
genre5 = re.findall('[|].*?[|].*?[|].*?[|].*?[|](.*?)[|]', genres[0])
except IndexError:
genre5 = []
if not year:
year = re.findall('<FirstAired>([0-9]+)-', output)
if not plotfull:
plotfull = re.findall('<Overview>(.*?)</Overview>', output, re.S)
backdrop = re.findall('<fanart>(.*?)</fanart>', output)
poster = re.findall('<poster>(.*?)</poster>', output)
try:
if not episode:
self.namelist[self.dbcount - 1] = name[0].replace('Das n\xc3\xa4chste Jahrhundert', 'TNG')
else:
self.namelist[self.dbcount - 1] = name[0].replace('Das n\xc3\xa4chste Jahrhundert', 'TNG') + ' - (' + data + ') ' + episode[0]
except IndexError:
self.namelist[self.dbcount - 1] = self.name
res = []
try:
res.append(runtime[0] + ' min')
except IndexError:
res.append(' ')
try:
res.append(rating[0])
except IndexError:
res.append('0.0')
try:
if not director:
res.append('Various')
else:
res.append(director[0])
except IndexError:
res.append('Various')
try:
actors = actor[0]
except IndexError:
actors = ' '
try:
actors = actors + ', ' + actor2[0]
except IndexError:
pass
try:
actors = actors + ', ' + actor3[0]
except IndexError:
pass
try:
actors = actors + ', ' + actor4[0]
except IndexError:
pass
try:
actors = actors + ', ' + actor5[0]
except IndexError:
pass
try:
actors = actors + ', ' + actor6[0]
except IndexError:
pass
if len(actors) < 95:
try:
actors = actors + ', ' + actor7[0]
except IndexError:
pass
res.append(actors)
try:
genres = genre[0]
except IndexError:
genres = ' '
try:
genres = genres + ', ' + genre2[0]
except IndexError:
pass
try:
genres = genres + ', ' + genre3[0]
except IndexError:
pass
try:
genres = genres + ', ' + genre4[0]
except IndexError:
pass
try:
genres = genres + ', ' + genre5[0]
except IndexError:
pass
try:
res.append(genres.replace('Science-Fiction', 'Sci-Fi'))
except IndexError:
res.append(' ')
try:
res.append(year[0])
except IndexError:
res.append(' ')
if config.plugins.moviebrowser.language.value == 'de':
country = 'DE'
elif config.plugins.moviebrowser.language.value == 'es':
country = 'ES'
else:
country = 'USA'
res.append(country)
self.infolist.append(res)
try:
if not guest:
plotfull = plotfull[0].replace('\n', '').replace('"', '"')
else:
plotfull = plotfull[0].replace('\n', '').replace('"', '"')
plotfull = plotfull + ' Guest Stars: ' + guest[0].replace('|', ', ') + '.'
self.plotlist.append(plotfull)
except IndexError:
self.plotlist.append(' ')
try:
self.backdroplist.append('http://www.thetvdb.com/banners/' + backdrop[0])
except IndexError:
self.backdroplist.append('http://cf2.imgobject.com/t/p/w1280' + '/default_backdrop.png')
try:
if not eposter:
self.posterlist.append('http://www.thetvdb.com/banners/_cache/' + poster[0])
else:
self.posterlist.append('http://www.thetvdb.com/banners/_cache/' + poster[0] + '<episode>' + 'http://www.thetvdb.com/banners/' + eposter[0] + '<episode>')
except IndexError:
self.posterlist.append('http://cf2.imgobject.com/t/p/w154' + '/default_poster.png')
self.makeDataEntry(self.dbcount - 1, False)
def makeDataEntry(self, count, content):
if self.renew == False:
f = open(self.database, 'a')
try:
if content == True:
data = self.namelist[count] + ':::' + self.movielist[count] + ':::' + str(self.datelist[count]) + ':::' + self.infolist[count][0] + ':::' + self.infolist[count][1] + ':::' + self.infolist[count][2] + ':::' + self.infolist[count][3] + ':::' + self.infolist[count][4] + ':::' + self.infolist[count][5] + ':::' + self.infolist[count][6] + ':::' + self.plotlist[count] + ':::' + self.posterlist[count] + ':::' + self.backdroplist[count] + ':::Movie:::\n'
else:
data = self.namelist[count] + ':::' + self.movielist[count] + ':::' + str(self.datelist[count]) + ':::' + self.infolist[count][0] + ':::' + self.infolist[count][1] + ':::' + self.infolist[count][2] + ':::' + self.infolist[count][3] + ':::' + self.infolist[count][4] + ':::' + self.infolist[count][5] + ':::' + self.infolist[count][6] + ':::' + self.plotlist[count] + ':::' + self.posterlist[count] + ':::' + self.backdroplist[count] + ':::Series:::\n'
f.write(data)
except IndexError:
pass
f.close()
else:
try:
if content == True:
newdata = self.namelist[count] + ':::' + self.movielist[self.index] + ':::' + self.datelist[self.index] + ':::' + self.infolist[count][0] + ':::' + self.infolist[count][1] + ':::' + self.infolist[count][2] + ':::' + self.infolist[count][3] + ':::' + self.infolist[count][4] + ':::' + self.infolist[count][5] + ':::' + self.infolist[count][6] + ':::' + self.plotlist[count] + ':::' + self.posterlist[count] + ':::' + self.backdroplist[count] + ':::Movie:::'
else:
newdata = self.namelist[count] + ':::' + self.movielist[self.index] + ':::' + self.datelist[self.index] + ':::' + self.infolist[count][0] + ':::' + self.infolist[count][1] + ':::' + self.infolist[count][2] + ':::' + self.infolist[count][3] + ':::' + self.infolist[count][4] + ':::' + self.infolist[count][5] + ':::' + self.infolist[count][6] + ':::' + self.plotlist[count] + ':::' + self.posterlist[count] + ':::' + self.backdroplist[count] + ':::Series:::'
except IndexError:
newdata = ''
data = open(self.database).read()
movie = self.movielist[self.index]
movie = sub('\\(', '.', movie)
movie = sub('\\)', '.', movie)
if search(movie, data) is not None:
for line in data.split('\n'):
if search(movie, line) is not None:
data = data.replace(line, newdata)
f = open(self.database, 'w')
f.write(data)
f.close()
if self.dbcount < self.dbcountmax:
self.dbcount += 1
self.name = self.namelist[self.dbcount - 1]
if self.firstdatabase == 'tmdb':
movie = self.name.replace(' ', '+').replace(':', '+').replace('-', '+').replace('_', '+')
url = 'http://api.themoviedb.org/3/search/movie?api_key=dfc629f7ff6936a269f8c5cdb194c890&query=' + movie + self.language
self.getTMDbData(url, 1, '0', False)
elif self.firstdatabase == 'imdb':
movie = self.name.replace(' ', '+').replace(':', '+').replace('_', '+')
url = 'http://imdbapi.org/?title=%s&type=xml&plot=full&episode=0&limit=1&yg=0&mt=none&lang=en-US&offset=&aka=simple&release=simple&business=0&tech=0' % movie
self.getIMDbData(url, 1)
else:
movie = self.name.replace(' ', '+').replace(':', '+').replace('_', '+')
movie = movie + 'FIN'
movie = sub('[Ss][0-9]+[Ee][0-9]+.*?FIN', '', movie)
movie = sub('FIN', '', movie)
url = 'http://www.thetvdb.com/api/GetSeries.php?seriesname=' + movie
self.getTVDbData(url, 1, '0')
elif self.update == True:
if self.reset == True:
self.session.openWithCallback(self.exit, movieBrowserPosterwall, self.index, config.plugins.moviebrowser.filter.value, config.plugins.moviebrowser.filter.value)
else:
self.finished_update(True)
else:
self.finished()
def finished(self):
if self.renew == False:
self.index = 0
self.oldindex = 0
self.wallindex = 0
self.pagecount = 1
self.makeMovies(self.filter)
else:
self.renew = False
self.makeMovies(self.filter)
def finished_update(self, found):
if found == False and self.orphaned == 0:
self.session.open(MessageBox, _('\nNo new Movies found:\nYour Database is up to date.'), MessageBox.TYPE_INFO)
os.remove(self.updatefile)
self.makeMovies(self.filter)
elif found == False:
if self.orphaned == 1:
self.session.open(MessageBox, _('\nNo new Movies found.\n%s Orphaned Movie deleted from Database.') % str(self.orphaned), MessageBox.TYPE_INFO)
else:
self.session.open(MessageBox, _('\nNo new Movies found.\n%s Orphaned Movies deleted from Database.') % str(self.orphaned), MessageBox.TYPE_INFO)
os.remove(self.updatefile)
self.makeMovies(self.filter)
elif self.orphaned == 0:
if self.dbcountmax == 1:
self.session.open(MessageBox, _('\n%s Movie imported into Database.') % str(self.dbcountmax), MessageBox.TYPE_INFO)
else:
self.session.open(MessageBox, _('\n%s Movies imported into Database.') % str(self.dbcountmax), MessageBox.TYPE_INFO)
if fileExists(self.updatefile):
self.sortDatabase()
os.remove(self.updatefile)
self.makeMovies(self.filter)
else:
if self.dbcountmax == 1 and self.orphaned == 1:
self.session.open(MessageBox, _('\n%s Movie imported into Database.\n%s Orphaned Movie deleted from Database.') % (str(self.dbcountmax), str(self.orphaned)), MessageBox.TYPE_INFO)
elif self.dbcountmax == 1:
self.session.open(MessageBox, _('\n%s Movie imported into Database.\n%s Orphaned Movies deleted from Database.') % (str(self.dbcountmax), str(self.orphaned)), MessageBox.TYPE_INFO)
elif self.orphaned == 1:
self.session.open(MessageBox, _('\n%s Movies imported into Database.\n%s Orphaned Movie deleted from Database.') % (str(self.dbcountmax), str(self.orphaned)), MessageBox.TYPE_INFO)
else:
self.session.open(MessageBox, _('\n%s Movies imported into Database.\n%s Orphaned Movies deleted from Database.') % (str(self.dbcountmax), str(self.orphaned)), MessageBox.TYPE_INFO)
if fileExists(self.updatefile):
self.sortDatabase()
os.remove(self.updatefile)
self.makeMovies(self.filter)
def ok(self):
if self.ready == True:
try:
filename = self.movielist[self.index]
if filename.endswith('.ts'):
sref = eServiceReference('1:0:0:0:0:0:0:0:0:0:' + filename)
sref.setName(self.namelist[self.index])
self.session.open(MoviePlayer, sref)
elif filename.endswith('.iso') or filename.endswith('.ISO'):
if os.path.exists('/usr/lib/enigma2/python/Plugins/Extensions/DVDPlayer/'):
from Plugins.Extensions.DVDPlayer.plugin import DVDPlayer
self.session.open(DVDPlayer, dvd_filelist=[filename])
else:
self.session.open(MessageBox, _('DVD Player Plugin not installed.'), MessageBox.TYPE_ERROR)
else:
sref = eServiceReference('4097:0:0:0:0:0:0:0:0:0:' + filename)
sref.setName(self.namelist[self.index])
self.session.open(MoviePlayer, sref)
except IndexError:
pass
def renewIMDb(self):
if self.ready == True:
name = self.movielist[self.index]
name = sub('.*?[/]', '', name)
if name.endswith('.ts'):
name = sub('.*? - .*? - ', '', name)
name = sub('[.]ts', '', name)
else:
name = sub('[.]avi', '', name)
name = sub('[.]divx', '', name)
name = sub('[.]flv', '', name)
name = sub('[.]iso', '', name)
name = sub('[.]ISO', '', name)
name = sub('[.]m2ts', '', name)
name = sub('[.]mov', '', name)
name = sub('[.]mp4', '', name)
name = sub('[.]mpg', '', name)
name = sub('[.]mpeg', '', name)
name = sub('[.]mkv', '', name)
name = sub('[.]vob', '', name)
self.session.openWithCallback(self.renewIMDbReturn, VirtualKeyBoard, title='Update Single Movie Data - IMDb:', text=name)
def renewIMDbReturn(self, name):
if name and name != '':
self.name = name
name = name.replace(' ', '+').replace(':', '+').replace('_', '+')
url = 'http://imdbapi.org/?title=%s&type=xml&plot=full&episode=0&limit=10&yg=0&mt=none&lang=en-US&offset=&aka=simple&release=simple&business=0&tech=0' % name
self.getIMDbMovies(url, 1)
def getIMDbMovies(self, url, runlevel):
agents = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)'}
request = Request(url, headers=agents)
try:
output = urlopen(request).read()
except URLError:
output = ''
except HTTPError:
output = ''
except socket.error:
output = ''
output = output.replace('&', '&')
output = sub('</type><imdb_id>', '</type><poster>http://profile.ak.fbcdn.net/hprofile-ak-snc7/373026_15925638948_1021284996_q.jpg</poster><imdb_id>', output)
rating = re.findall('<rating>(.*?)</rating>', output)
year = re.findall('<year>(.*?)</year>', output)
titles = re.findall('<title>(.*?)</title>', output)
poster = re.findall('<poster>(.*?)</poster>', output)
id = re.findall('<imdb_id>(.*?)</imdb_id>', output)
country = re.findall('<country><item>(.*?)</item>', output)
titel = 'IMDb Results'
if not titles and runlevel == 1:
text = self.name.replace(' ', '%20')
self.transrenew = 'imdb'
self.translateRenewGoogle(text)
elif not titles and runlevel == 2:
self.session.openWithCallback(self.tvdb_return, MessageBox, _('\nNo IMDb Results - looking for %s on TheTVDb?') % self.name, MessageBox.TYPE_YESNO)
else:
self.session.openWithCallback(self.makeIMDbUpdate, moviesList, titel, rating, year, titles, poster, id, country)
def makeIMDbUpdate(self, id):
self.renew = True
self.firstdatabase = 'imdb'
self.dbcount = 1
self.dbcountmax = 1
self.infolist = []
self.plotlist = []
self.backdroplist = []
self.posterlist = []
url = 'http://imdbapi.org/?ids=%s&type=xml&plot=full&episode=0&lang=en-US&aka=simple&release=simple&business=0&tech=0' % id
self.getIMDbData(url, 1)
def renewTMDb(self):
if self.ready == True:
name = self.movielist[self.index]
name = sub('.*?[/]', '', name)
if name.endswith('.ts'):
name = sub('.*? - .*? - ', '', name)
name = sub('[.]ts', '', name)
else:
name = sub('[.]avi', '', name)
name = sub('[.]divx', '', name)
name = sub('[.]flv', '', name)
name = sub('[.]iso', '', name)
name = sub('[.]ISO', '', name)
name = sub('[.]m2ts', '', name)
name = sub('[.]mov', '', name)
name = sub('[.]mp4', '', name)
name = sub('[.]mpg', '', name)
name = sub('[.]mpeg', '', name)
name = sub('[.]mkv', '', name)
name = sub('[.]vob', '', name)
if config.plugins.moviebrowser.database.value == 'tvdb':
self.session.openWithCallback(self.renewTMDbReturn, VirtualKeyBoard, title='Update Single Series Data - TheTVDb:', text=name)
else:
self.session.openWithCallback(self.renewTMDbReturn, VirtualKeyBoard, title='Update Single Movie Data - TMDb:', text=name)
def renewTMDbReturn(self, name):
if name and name != '':
self.name = name
if config.plugins.moviebrowser.database.value == 'tmdb':
name = name.replace(' ', '+').replace(':', '+').replace('-', '+').replace('_', '+')
url = 'http://api.themoviedb.org/3/search/movie?api_key=dfc629f7ff6936a269f8c5cdb194c890&query=' + name + self.language
self.getTMDbMovies(url, 1)
else:
name = name.replace(' ', '+').replace(':', '+').replace('_', '+')
name = name + 'FIN'
name = sub('[Ss][0-9]+[Ee][0-9]+.*?FIN', '', name)
name = sub('FIN', '', name)
url = 'http://www.thetvdb.com/api/GetSeries.php?seriesname=' + name
self.getTVDbMovies(url, 1)
def getTMDbMovies(self, url, runlevel):
headers = {'Accept': 'application/json'}
request = Request(url, headers=headers)
try:
output = urlopen(request).read()
except URLError:
output = ''
except HTTPError:
output = ''
except socket.error:
output = ''
output = output.replace('&', '&')
output = sub('"poster_path":"', '"poster_path":"http://cf2.imgobject.com/t/p/w154', output)
output = sub('"poster_path":null', '"poster_path":"http://www.themoviedb.org/images/apps/moviebase.png"', output)
rating = re.findall('"vote_average":(.*?),', output)
year = re.findall('"release_date":"(.*?)-', output)
titles = re.findall('"title":"(.*?)"', output)
poster = re.findall('"poster_path":"(.*?)"', output)
id = re.findall('"id":(.*?),', output)
country = re.findall('"backdrop(.*?)_path"', output)
titel = 'TMDb Results'
if not titles and runlevel == 1:
text = self.name.replace(' ', '%20')
self.transrenew = 'tmdb'
self.translateRenewGoogle(text)
elif not titles and runlevel == 2:
self.session.openWithCallback(self.tvdb_return, MessageBox, _('\nNo TMDb Results - looking for %s on TheTVDb?') % self.name, MessageBox.TYPE_YESNO)
else:
self.session.openWithCallback(self.makeTMDbUpdate, moviesList, titel, rating, year, titles, poster, id, country)
def tvdb_return(self, answer):
if answer is True:
name = self.name.replace(' ', '+').replace(':', '+').replace('_', '+')
name = name + 'FIN'
name = sub('[Ss][0-9]+[Ee][0-9]+.*?FIN', '', name)
name = sub('FIN', '', name)
url = 'http://www.thetvdb.com/api/GetSeries.php?seriesname=' + name
self.getTVDbMovies(url, 1)
def getTVDbMovies(self, url, runlevel):
rating = []
year = []
titles = []
poster = []
id = []
country = []
agents = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)'}
request = Request(url, headers=agents)
try:
output = urlopen(request).read()
except URLError:
output = ''
except HTTPError:
output = ''
except socket.error:
output = ''
output = output.replace('&', '&')
seriesid = re.findall('<seriesid>(.*?)</seriesid>', output)
for x in range(len(seriesid)):
url = 'http://www.thetvdb.com/data/series/' + seriesid[x] + '/' + config.plugins.moviebrowser.language.value + '.xml'
agents = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)'}
request = Request(url, headers=agents)
try:
output = urlopen(request).read()
except URLError:
output = ''
except HTTPError:
output = ''
except socket.error:
output = ''
output = sub('<poster>', '<poster>http://www.thetvdb.com/banners/_cache/', output)
output = sub('<poster>http://www.thetvdb.com/banners/_cache/</poster>', '<poster>http://www.thetvdb.com/wiki/skins/common/images/wiki.png</poster>', output)
output = sub('<Rating></Rating>', '<Rating>0.0</Rating>', output)
output = sub('&', '&', output)
Rating = re.findall('<Rating>(.*?)</Rating>', output)
Year = re.findall('<FirstAired>([0-9]+)-', output)
Added = re.findall('<added>([0-9]+)-', output)
Titles = re.findall('<SeriesName>(.*?)</SeriesName>', output)
Poster = re.findall('<poster>(.*?)</poster>', output)
TVDbid = re.findall('<id>(.*?)</id>', output)
Country = re.findall('<Status>(.*?)</Status>', output)
try:
rating.append(Rating[0])
except IndexError:
rating('0.0')
try:
year.append(Year[0])
except IndexError:
try:
year.append(Added[0])
except IndexError:
year.append(' ')
try:
titles.append(Titles[0])
except IndexError:
titles.append(' ')
try:
poster.append(Poster[0])
except IndexError:
poster.append('http://www.thetvdb.com/wiki/skins/common/images/wiki.png')
try:
id.append(TVDbid[0])
except IndexError:
id.append('0')
try:
country.append(Country[0])
except IndexError:
country.append(' ')
titel = 'TheTVDb Results'
if not titles and runlevel == 1:
text = self.name.replace(' ', '%20')
text = text + 'FIN'
text = sub('[Ss][0-9]+[Ee][0-9]+.*?FIN', '', text)
text = sub('FIN', '', text)
self.transrenew = 'tvdb'
self.translateRenewGoogle(text)
elif not titles and runlevel == 2:
self.session.open(MessageBox, _('\nNo TheTVDb Results for %s.') % self.name, MessageBox.TYPE_INFO)
else:
self.session.openWithCallback(self.makeTVDbUpdate, moviesList, titel, rating, year, titles, poster, id, country)
def makeTMDbUpdate(self, id):
self.renew = True
self.firstdatabase = 'tmdb'
self.dbcount = 1
self.dbcountmax = 1
self.infolist = []
self.plotlist = []
self.backdroplist = []
self.posterlist = []
url = 'http://api.themoviedb.org/3/movie/%s?api_key=dfc629f7ff6936a269f8c5cdb194c890' % id + self.language
self.getTMDbData(url, 1, id, True)
def makeTVDbUpdate(self, id):
self.renew = True
self.firstdatabase = 'tvdb'
self.dbcount = 1
self.dbcountmax = 1
self.infolist = []
self.plotlist = []
self.backdroplist = []
self.posterlist = []
url = 'http://www.thetvdb.com/data/series/' + id + '/' + config.plugins.moviebrowser.language.value + '.xml'
self.getTVDbData(url, 1, id)
def translateRenewGoogle(self, text):
if config.plugins.moviebrowser.language.value == 'de':
url = 'http://translate.google.com/m?hl=en&sl=de&q=%s' % text.title()
elif config.plugins.moviebrowser.language.value == 'es':
url = 'http://translate.google.com/m?hl=en&sl=es&q=%s' % text.title()
elif config.plugins.moviebrowser.language.value == 'ru':
url = 'http://translate.google.com/m?hl=en&sl=ru&q=%s' % text.title()
else:
url = 'http://translate.google.com/m?hl=en&sl=en&q=%s' % text.title()
agents = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)'}
before_trans = 'class="t0">'
request = Request(url, headers=agents)
try:
output = urlopen(request).read()
data = output[output.find(before_trans) + len(before_trans):]
movie = data.split('<')[0]
print '%s >> %s' % (text, movie)
except URLError:
movie = text
except HTTPError:
movie = text
except socket.error:
movie = text
if self.transrenew == 'imdb':
movie = movie.replace(' ', '+').replace(':', '+').replace('_', '+')
url = 'http://imdbapi.org/?title=%s&type=xml&plot=full&episode=0&limit=10&yg=0&mt=none&lang=en-US&offset=&aka=simple&release=simple&business=0&tech=0' % movie
self.getIMDbMovies(url, 2)
elif self.transrenew == 'tmdb':
movie = movie.replace(' ', '+').replace(':', '+').replace('-', '+').replace('_', '+')
url = 'http://api.themoviedb.org/3/search/movie?api_key=dfc629f7ff6936a269f8c5cdb194c890&query=' + movie + self.language
self.getTMDbMovies(url, 2)
elif self.transrenew == 'tvdb':
movie = movie.replace(' ', '+').replace(':', '+').replace('_', '+')
url = 'http://www.thetvdb.com/api/GetSeries.php?seriesname=' + movie
self.getTVDbMovies(url, 2)
def deleteMovie(self):
if self.ready == True:
try:
name = self.namelist[self.index]
self.session.openWithCallback(self.delete_return, MessageBox, _('\nDo you really want to delete %s?') % name, MessageBox.TYPE_YESNO)
except IndexError:
pass
def delete_return(self, answer):
if answer is True:
try:
movie = self.movielist[self.index]
if fileExists(movie):
os.remove(movie)
if search('[.]ts', movie) is not None:
eitfile = sub('[.]ts', '.eit', movie)
if fileExists(eitfile):
os.remove(eitfile)
if fileExists(movie + '.ap'):
os.remove(movie + '.ap')
if fileExists(movie + '.cuts'):
os.remove(movie + '.cuts')
if fileExists(movie + '.meta'):
os.remove(movie + '.meta')
if fileExists(movie + '.sc'):
os.remove(movie + '.sc')
if fileExists(movie + '_mp.jpg'):
os.remove(movie + '_mp.jpg')
movie = sub('\\(', '.', movie)
movie = sub('\\)', '.', movie)
data = open(self.database).read()
for line in data.split('\n'):
if search(movie, line) is not None:
data = data.replace(line + '\n', '')
f = open(self.database, 'w')
f.write(data)
f.close()
if self.index == self.maxentry - 1:
self.index = 0
self.oldindex = self.wallindex
self.wallindex = 0
self.pagecount = 1
self.makeMovies(self.filter)
except IndexError:
pass
else:
self.blacklistMovie()
def blacklistMovie(self):
if self.ready == True:
try:
name = self.namelist[self.index]
self.session.openWithCallback(self.blacklist_return, MessageBox, _('\nDo you really want to blacklist %s?') % name, MessageBox.TYPE_YESNO)
except IndexError:
pass
def blacklist_return(self, answer):
if answer is True:
self.ready = False
try:
movie = self.movielist[self.index]
movie = sub('\\(', '.', movie)
movie = sub('\\)', '.', movie)
if fileExists(self.blacklist):
fremove = open(self.blacklist, 'a')
else:
open(self.blacklist, 'w').close()
fremove = open(self.blacklist, 'a')
data = open(self.database).read()
for line in data.split('\n'):
if search(movie, line) is not None:
fremove.write(line + '\n')
fremove.close()
data = data.replace(line + '\n', '')
f = open(self.database, 'w')
f.write(data)
f.close()
if self.index == self.maxentry - 1:
self.index = 0
self.oldindex = self.wallindex
self.wallindex = 0
self.pagecount = 1
self.makeMovies(self.filter)
except IndexError:
pass
def toggleBackdrops(self):
if self.ready == True:
if self.backdrops == True:
self.backdrops = False
self.hideBackdrops()
else:
self.backdrops = True
try:
self.showBackdrops(self.index)
except IndexError:
pass
def hideBackdrops(self):
backdrop = config.plugins.moviebrowser.cachefolder.value + '/default_backdrop.png'
if fileExists(backdrop):
if self.xd == False:
Backdrop = loadPic(backdrop, 1280, 720, 3, 0, 0, 1)
else:
Backdrop = loadPic(backdrop, 1024, 576, 3, 0, 0, 1)
if Backdrop != None:
self['backdrop'].instance.setPixmap(Backdrop)
self['backdrop'].show()
def showBackdrops(self, index):
try:
backdropurl = self.backdroplist[index]
backdrop = sub('http://cf2.imgobject.com/t/p/w1280', '', backdropurl)
backdrop = sub('http://www.thetvdb.com/banners/fanart/original', '', backdrop)
backdrop = config.plugins.moviebrowser.cachefolder.value + backdrop
if config.plugins.moviebrowser.m1v.value == 'yes':
backdrop_m1v = backdrop.replace('.jpg', '.m1v')
if fileExists(backdrop_m1v):
self['backdrop'].hide()
os.system("/usr/bin/showiframe '%s'" % backdrop_m1v)
elif fileExists(backdrop):
if self.xd == False:
Backdrop = loadPic(backdrop, 1280, 720, 3, 0, 0, 1)
else:
Backdrop = loadPic(backdrop, 1024, 576, 3, 0, 0, 1)
if Backdrop != None:
self['backdrop'].instance.setPixmap(Backdrop)
self['backdrop'].show()
else:
getPage(backdropurl).addCallback(self.getBackdrop, backdrop, index).addErrback(self.downloadError)
elif fileExists(backdrop):
if self.xd == False:
Backdrop = loadPic(backdrop, 1280, 720, 3, 0, 0, 1)
else:
Backdrop = loadPic(backdrop, 1024, 576, 3, 0, 0, 1)
if Backdrop != None:
self['backdrop'].instance.setPixmap(Backdrop)
self['backdrop'].show()
else:
getPage(backdropurl).addCallback(self.getBackdrop, backdrop, index).addErrback(self.downloadError)
except IndexError:
self['backdrop'].hide()
def getBackdrop(self, output, backdrop, index):
f = open(backdrop, 'wb')
f.write(output)
f.close()
if self.xd == False:
Backdrop = loadPic(backdrop, 1280, 720, 3, 0, 0, 1)
else:
Backdrop = loadPic(backdrop, 1024, 576, 3, 0, 0, 1)
if Backdrop != None:
self['backdrop'].instance.setPixmap(Backdrop)
self['backdrop'].show()
def makePoster(self, page):
for x in range(self.posterALL):
try:
index = x + page * self.posterALL
posterurl = self.posterlist[index]
posterurl = sub('<episode>.*?<episode>', '', posterurl)
poster = sub('http://cf2.imgobject.com/t/p/w154', '', posterurl)
poster = sub('http://www.thetvdb.com/banners/_cache/posters', '', poster)
poster = config.plugins.moviebrowser.cachefolder.value + poster
if fileExists(poster):
if self.xd == False:
Poster = loadPic(poster, 133, 200, 3, 0, 0, 1)
else:
Poster = loadPic(poster, 106, 160, 3, 0, 0, 1)
if Poster != None:
self['poster' + str(x)].instance.setPixmap(Poster)
self['poster' + str(x)].show()
else:
getPage(posterurl).addCallback(self.getPoster, x, poster).addErrback(self.downloadError)
except IndexError:
self['poster' + str(x)].hide()
self['poster_back' + str(self.wallindex)].hide()
def getPoster(self, output, x, poster):
f = open(poster, 'wb')
f.write(output)
f.close()
if self.xd == False:
Poster = loadPic(poster, 133, 200, 3, 0, 0, 1)
else:
Poster = loadPic(poster, 106, 160, 3, 0, 0, 1)
if Poster != None:
self['poster' + str(x)].instance.setPixmap(Poster)
self['poster' + str(x)].show()
def paintFrame(self):
try:
pos = self.positionlist[self.wallindex]
self['frame'].instance.move(ePoint(pos[0], pos[1]))
self['poster_back' + str(self.oldindex)].show()
self['poster_back' + str(self.wallindex)].hide()
posterurl = self.posterlist[self.index]
poster = sub('http://cf2.imgobject.com/t/p/w154', '', posterurl)
poster = sub('http://www.thetvdb.com/banners/_cache/posters', '', poster)
poster = sub('<episode>.*?<episode>', '', poster)
poster = config.plugins.moviebrowser.cachefolder.value + poster
if fileExists(poster):
if self.xd == False:
Poster = loadPic(poster, 153, 220, 3, 0, 0, 1)
else:
Poster = loadPic(poster, 126, 180, 3, 0, 0, 1)
if Poster != None:
self['frame'].instance.setPixmap(Poster)
except IndexError:
pass
def makeEPoster(self):
try:
posterurl = self.posterlist[self.index]
if search('<episode>', posterurl) is not None:
eposterurl = search('<episode>(.*?)<episode>', posterurl)
eposterurl = eposterurl.group(1)
eposter = sub('.*?[/]', '', eposterurl)
eposter = config.plugins.moviebrowser.cachefolder.value + '/' + eposter
if fileExists(eposter):
if self.xd == False:
ePoster = loadPic(eposter, 500, 375, 3, 0, 0, 0)
else:
ePoster = loadPic(eposter, 440, 330, 3, 0, 0, 0)
if ePoster != None:
self['2name'].hide()
self['2genres'].hide()
self['eposter'].instance.setPixmap(ePoster)
self['eposter'].show()
else:
getPage(eposterurl).addCallback(self.getEPoster, eposter).addErrback(self.downloadError)
else:
self['eposter'].hide()
except IndexError:
pass
def getEPoster(self, output, eposter):
f = open(eposter, 'wb')
f.write(output)
f.close()
if self.xd == False:
ePoster = loadPic(eposter, 500, 375, 3, 0, 0, 0)
else:
ePoster = loadPic(eposter, 440, 330, 3, 0, 0, 0)
if ePoster != None:
self['2name'].hide()
self['2genres'].hide()
self['eposter'].instance.setPixmap(ePoster)
self['eposter'].show()
def makeName(self, count):
try:
name = self.namelist[count]
if self.xd == True:
if len(name) > 64:
if name[63:64] == ' ':
name = name[0:63]
else:
name = name[0:64] + 'FIN'
name = sub(' \\S+FIN', '', name)
name = name + '...'
elif len(name) > 137:
if name[136:137] == ' ':
name = name[0:136]
else:
name = name[0:137] + 'FIN'
name = sub(' \\S+FIN', '', name)
name = name + '...'
self['name'].setText(name)
self['name'].show()
except IndexError:
self['name'].hide()
def makeInfo(self, count):
try:
runtime = '(' + self.infolist[count][0] + ')'
self['runtime'].setText(runtime)
self['runtime'].show()
except IndexError:
self['runtime'].hide()
try:
ratings = self.infolist[count][1]
try:
rating = int(10 * round(float(ratings), 1))
except ValueError:
ratings = '0.0'
rating = int(10 * round(float(ratings), 1))
self['ratings'].setValue(rating)
self['ratings'].show()
self['ratingsback'].show()
self['ratingtext'].setText(ratings)
except IndexError:
self['ratings'].hide()
try:
year = self.infolist[count][5]
self['year'].setText(year)
self['year'].show()
except IndexError:
self['year'].hide()
try:
country = self.infolist[count][6]
self['country'].setText(country)
self['country'].show()
except IndexError:
self['country'].hide()
def toggleInfoFull(self):
if self.ready == True:
if self.showplotfull == False:
if self.infofull == False and self.plotfull == False:
self.infofull = True
try:
self.showInfoFull(self.index)
except IndexError:
pass
elif self.infofull == True and self.plotfull == False:
self.infofull = True
self.plotfull = True
try:
self.showPlotFull(self.index)
except IndexError:
pass
elif self.infofull == True and self.plotfull == True:
self.infofull = False
self.plotfull = False
self.hideInfoFull()
self.hidePlotFull()
elif self.plotfull == False:
self.infofull = True
self.plotfull = True
try:
self.showInfoFull(self.index)
self.showPlotFull(self.index)
except IndexError:
pass
elif self.plotfull == True:
self.infofull = False
self.plotfull = False
self.hideInfoFull()
self.hidePlotFull()
def showInfoFull(self, count):
if self.xd == False:
InfoFull = loadPic(self.infoBackPNG, 525, 430, 3, 0, 0, 1)
else:
InfoFull = loadPic(self.infoBackPNG, 460, 400, 3, 0, 0, 1)
if InfoFull != None:
self['2infoback'].instance.setPixmap(InfoFull)
self['2infoback'].show()
try:
name = self.namelist[count]
if self.xd == True:
if len(name) > 66:
if name[65:66] == ' ':
name = name[0:65]
else:
name = name[0:66] + 'FIN'
name = sub(' \\S+FIN', '', name)
name = name + '...'
elif len(name) > 63:
if name[62:63] == ' ':
name = name[0:62]
else:
name = name[0:63] + 'FIN'
name = sub(' \\S+FIN', '', name)
name = name + '...'
self['2name'].setText(name)
self['2name'].show()
except IndexError:
self['2name'].hide()
try:
runtime = self.infolist[count][0]
self['2runtime'].setText(runtime)
self['2runtime'].show()
self['2Runtime'].setText('Runtime:')
self['2Runtime'].show()
except IndexError:
self['2runtime'].hide()
self['2Runtime'].hide()
try:
ratings = self.infolist[count][1]
try:
rating = int(10 * round(float(ratings), 1))
except ValueError:
ratings = '0.0'
rating = int(10 * round(float(ratings), 1))
self['2ratings'].setValue(rating)
self['2ratings'].show()
self['2ratingsback'].show()
self['2ratingtext'].setText(ratings)
self['2ratingtext'].show()
self['2Rating'].setText('Rating:')
self['2Rating'].show()
except IndexError:
self['2ratings'].hide()
self['2ratingsback'].hide()
self['2ratingtext'].hide()
self['2Rating'].hide()
try:
director = self.infolist[count][2]
self['2director'].setText(director)
self['2director'].show()
self['2Director'].setText('Director:')
self['2Director'].show()
except IndexError:
self['2director'].hide()
self['2Director'].hide()
try:
actors = self.infolist[count][3]
self['2actors'].setText(actors)
self['2actors'].show()
self['2Actors'].setText('Actors:')
self['2Actors'].show()
except IndexError:
self['2actors'].hide()
self['2Actors'].hide()
try:
genres = self.infolist[count][4]
self['2genres'].setText(genres)
self['2genres'].show()
self['2Genres'].setText('Genres:')
self['2Genres'].show()
except IndexError:
self['2genres'].hide()
self['2Genres'].hide()
try:
year = self.infolist[count][5]
self['2year'].setText(year)
self['2year'].show()
self['2Year'].setText('Year:')
self['2Year'].show()
except IndexError:
self['2year'].hide()
self['2Year'].hide()
try:
country = self.infolist[count][6]
self['2country'].setText(country)
self['2country'].show()
self['2Country'].setText('Country:')
self['2Country'].show()
except IndexError:
self['2country'].hide()
self['2Country'].hide()
def hideInfoFull(self):
self['2name'].hide()
self['2runtime'].hide()
self['2Runtime'].hide()
self['2ratings'].hide()
self['2ratingsback'].hide()
self['2ratingtext'].hide()
self['2Rating'].hide()
self['2director'].hide()
self['2Director'].hide()
self['2actors'].hide()
self['2Actors'].hide()
self['2genres'].hide()
self['2Genres'].hide()
self['2year'].hide()
self['2Year'].hide()
self['2country'].hide()
self['2Country'].hide()
self['2infoback'].hide()
def showPlotFull(self, index):
if self.xd == False:
PlotFull = loadPic(self.infoBackPNG, 525, 430, 3, 0, 0, 1)
else:
PlotFull = loadPic(self.infoBackPNG, 460, 400, 3, 0, 0, 1)
if PlotFull != None:
self['plotfullback'].instance.setPixmap(PlotFull)
self['plotfullback'].show()
try:
plot = self.plotlist[self.index]
self['plotfull'].setText(plot)
self['plotfull'].show()
self.makeEPoster()
except IndexError:
self['plotfull'].hide()
self['eposter'].hide()
def hidePlotFull(self):
self['eposter'].hide()
self['plotfull'].hide()
self['plotfullback'].hide()
def rightDown(self):
if self.ready == True:
self.oldindex = self.wallindex
self.wallindex += 1
if self.pagecount == self.pagemax and self.wallindex > self.posterREST - 1:
self.wallindex = 0
self.pagecount = 1
self.makePoster(self.pagecount - 1)
elif self.wallindex == self.posterALL:
self.wallindex = 0
self.pagecount += 1
self.makePoster(self.pagecount - 1)
self.index += 1
if self.index == self.maxentry:
self.index = 0
self.paintFrame()
try:
if self.backdrops == True:
self.showBackdrops(self.index)
if self.infofull == True:
self.showInfoFull(self.index)
if self.plotfull == True:
self.showPlotFull(self.index)
self.makeName(self.index)
self.makeInfo(self.index)
except IndexError:
pass
def down(self):
if self.ready == True:
self.oldindex = self.wallindex
self.wallindex += self.posterX
if self.pagecount == self.pagemax - 1 and self.wallindex > self.posterALL + self.posterREST - 2:
self.wallindex = self.posterREST - 1
self.pagecount += 1
self.makePoster(self.pagecount - 1)
self.index = self.maxentry - 1
elif self.pagecount == self.pagemax and self.wallindex > self.posterREST - 1:
if self.wallindex >= self.posterX:
self.wallindex = self.wallindex % self.posterX
self.pagecount = 1
self.makePoster(self.pagecount - 1)
if self.wallindex >= self.maxentry % self.posterX:
self.index = self.index + (self.posterX + self.maxentry % self.posterX)
if self.index >= self.maxentry:
self.index = self.index - self.maxentry
else:
self.index = self.index + self.maxentry % self.posterX
if self.index >= self.maxentry:
self.index = self.index - self.maxentry
elif self.wallindex > self.posterALL - 1:
self.wallindex = self.wallindex - self.posterALL
self.pagecount += 1
self.makePoster(self.pagecount - 1)
self.index = self.index + self.posterX
if self.index >= self.maxentry:
self.index = self.index - self.maxentry
else:
self.index = self.index + self.posterX
if self.index >= self.maxentry:
self.index = self.index - self.maxentry
self.paintFrame()
try:
if self.backdrops == True:
self.showBackdrops(self.index)
if self.infofull == True:
self.showInfoFull(self.index)
if self.plotfull == True:
self.showPlotFull(self.index)
self.makeName(self.index)
self.makeInfo(self.index)
except IndexError:
pass
def leftUp(self):
if self.ready == True:
self.oldindex = self.wallindex
self.wallindex -= 1
if self.wallindex < 0:
if self.pagecount == 1:
self.wallindex = self.posterREST - 1
self.pagecount = self.pagemax
else:
self.wallindex = self.posterALL - 1
self.pagecount -= 1
if self.wallindex < 0:
self.wallindex = 0
self.makePoster(self.pagecount - 1)
self.index -= 1
if self.index < 0:
self.index = self.maxentry - 1
self.paintFrame()
try:
if self.backdrops == True:
self.showBackdrops(self.index)
if self.infofull == True:
self.showInfoFull(self.index)
if self.plotfull == True:
self.showPlotFull(self.index)
self.makeName(self.index)
self.makeInfo(self.index)
except IndexError:
pass
def up(self):
if self.ready == True:
self.oldindex = self.wallindex
self.wallindex -= self.posterX
if self.wallindex < 0:
if self.pagecount == 1:
if self.oldindex < self.posterREST % self.posterX:
self.wallindex = self.posterREST // self.posterX * self.posterX + self.oldindex
if self.wallindex < 0:
self.wallindex = 0
self.index = self.index - self.posterREST % self.posterX
if self.index < 0:
self.index = self.maxentry + self.index
else:
self.wallindex = self.posterREST - 1
self.index = self.maxentry - 1
self.pagecount = self.pagemax
self.makePoster(self.pagecount - 1)
else:
self.wallindex = self.posterALL + self.wallindex
self.pagecount -= 1
if self.wallindex < 0:
self.wallindex = 0
self.makePoster(self.pagecount - 1)
self.index = self.index - self.posterX
if self.index < 0:
self.index = self.maxentry + self.index
else:
self.index = self.index - self.posterX
if self.index < 0:
self.index = self.maxentry + self.index
self.paintFrame()
try:
if self.backdrops == True:
self.showBackdrops(self.index)
if self.infofull == True:
self.showInfoFull(self.index)
if self.plotfull == True:
self.showPlotFull(self.index)
self.makeName(self.index)
self.makeInfo(self.index)
except IndexError:
pass
def gotoEnd(self):
if self.ready == True:
self.oldindex = self.wallindex
self.wallindex = self.posterREST - 1
self.pagecount = self.pagemax
self.makePoster(self.pagecount - 1)
self.index = self.maxentry - 1
self.paintFrame()
try:
if self.backdrops == True:
self.showBackdrops(self.index)
if self.infofull == True:
self.showInfoFull(self.index)
if self.plotfull == True:
self.showPlotFull(self.index)
self.makeName(self.index)
self.makeInfo(self.index)
except IndexError:
pass
def showMovies(self):
if self.ready == True:
movies = ''
if fileExists(self.database):
f = open(self.database, 'r')
for line in f:
if self.content in line and self.filter in line:
movieline = line.split(':::')
try:
movie = movieline[0]
except IndexError:
movie = ' '
if movie != ' ':
movies = movies + movie + ':::'
self.movies = [ i for i in movies.split(':::') ]
self.movies.pop()
self.session.openWithCallback(self.gotoMovie, allMovieList, self.movies, self.index, self.content)
def gotoMovie(self, index):
self.index = index
self.oldindex = self.wallindex
self.wallindex = self.index % self.posterALL
self.pagecount = self.index // self.posterALL + 1
self.makePoster(self.pagecount - 1)
self.paintFrame()
try:
if self.backdrops == True:
self.showBackdrops(self.index)
if self.infofull == True:
self.showInfoFull(self.index)
if self.plotfull == True:
self.showPlotFull(self.index)
self.makeName(self.index)
self.makeInfo(self.index)
except IndexError:
pass
def filterGenre(self):
if self.ready == True:
genres = ''
if fileExists(self.database):
f = open(self.database, 'r')
for line in f:
if self.content in line:
movieline = line.split(':::')
try:
genre = movieline[7]
except IndexError:
genre = ' '
if genre != ' ':
genres = genres + genre + ', '
self.genres = [ i for i in genres.split(', ') ]
self.genres.sort()
self.genres.pop(0)
try:
last = self.genres[-1]
for i in range(len(self.genres) - 2, -1, -1):
if last == self.genres[i]:
del self.genres[i]
else:
last = self.genres[i]
except IndexError:
pass
self.index = 0
self.wallindex = 0
self.pagecount = 1
self.oldindex = 0
self.pagemax = 1
self.session.openWithCallback(self.makeMovies, filterList, self.genres, 'Genre Filter')
def filterActor(self):
if self.ready == True:
actors = ''
if fileExists(self.database):
f = open(self.database, 'r')
for line in f:
if self.content in line:
movieline = line.split(':::')
try:
actor = movieline[6]
except IndexError:
actor = ' '
if actor != ' ':
actors = actors + actor + ', '
self.actors = [ i for i in actors.split(', ') ]
self.actors.sort()
self.actors.pop(0)
try:
last = self.actors[-1]
for i in range(len(self.actors) - 2, -1, -1):
if last == self.actors[i]:
del self.actors[i]
else:
last = self.actors[i]
except IndexError:
pass
self.index = 0
self.wallindex = 0
self.pagecount = 1
self.oldindex = 0
self.pagemax = 1
self.session.openWithCallback(self.makeMovies, filterList, self.actors, 'Actor Filter')
def filterDirector(self):
if self.ready == True:
directors = ''
if fileExists(self.database):
f = open(self.database, 'r')
for line in f:
if self.content in line:
movieline = line.split(':::')
try:
director = movieline[5]
except IndexError:
director = ' '
if director != ' ':
directors = directors + director + ', '
self.directors = [ i for i in directors.split(', ') ]
self.directors.sort()
self.directors.pop(0)
try:
last = self.directors[-1]
for i in range(len(self.directors) - 2, -1, -1):
if last == self.directors[i]:
del self.directors[i]
else:
last = self.directors[i]
except IndexError:
pass
self.index = 0
self.wallindex = 0
self.pagecount = 1
self.oldindex = 0
self.pagemax = 1
self.session.openWithCallback(self.makeMovies, filterList, self.directors, 'Director Filter')
def filterSeasons(self):
if self.ready == True:
self.content = ':::Series:::'
seasons = ''
if fileExists(self.database):
f = open(self.database, 'r')
for line in f:
if self.content in line:
movieline = line.split(':::')
try:
season = movieline[0]
season = season + 'FIN'
season = sub('[(]S', 'Season ', season)
season = sub('[(]s', 'season ', season)
season = sub('[Ee][0-9]+[)].*?FIN', '', season)
season = sub('FIN', '', season)
except IndexError:
season = ' '
if season != ' ':
seasons = seasons + season + ', '
self.seasons = [ i for i in seasons.split(', ') ]
self.seasons.sort()
self.seasons.pop(0)
try:
last = self.seasons[-1]
for i in range(len(self.seasons) - 2, -1, -1):
if last == self.seasons[i]:
del self.seasons[i]
else:
last = self.seasons[i]
except IndexError:
pass
self.index = 0
self.wallindex = 0
if self.xd == True:
self.posterindex = 5
else:
self.posterindex = 6
self.session.openWithCallback(self.makeMovies, filterSeasonList, self.seasons)
def sortDatabase(self):
self.sortorder = config.plugins.moviebrowser.sortorder.value
f = open(self.database, 'r')
lines = f.readlines()
f.close()
if self.sortorder == 'name':
lines.sort(key=lambda line: line.split(':::')[0].replace('Der ', '').replace('Die ', '').replace('Das ', '').replace('The ', '').lower())
elif self.sortorder == 'name_reverse':
lines.sort(key=lambda line: line.split(':::')[0].replace('Der ', '').replace('Die ', '').replace('Das ', '').replace('The ', '').lower(), reverse=True)
elif self.sortorder == 'rating':
lines.sort(key=lambda line: line.split(':::')[4])
elif self.sortorder == 'rating_reverse':
lines.sort(key=lambda line: line.split(':::')[4], reverse=True)
elif self.sortorder == 'year':
lines.sort(key=lambda line: line.split(':::')[8])
elif self.sortorder == 'year_reverse':
lines.sort(key=lambda line: line.split(':::')[8], reverse=True)
elif self.sortorder == 'date':
lines.sort(key=lambda line: line.split(':::')[2])
elif self.sortorder == 'date_reverse':
lines.sort(key=lambda line: line.split(':::')[2], reverse=True)
elif self.sortorder == 'folder':
lines.sort(key=lambda line: line.split(':::')[1])
elif self.sortorder == 'folder_reverse':
lines.sort(key=lambda line: line.split(':::')[1], reverse=True)
fsorted = open(self.database + '.sorted', 'w')
fsorted.writelines(lines)
fsorted.close()
os.rename(self.database + '.sorted', self.database)
def switchView(self):
if self.ready == True:
self.session.openWithCallback(self.exit, movieBrowserBackdrop, self.index, self.content, self.filter)
def toogleContent(self):
if self.ready == True:
if self.content == ':::Movie:::' or self.content == ':::':
self.content = ':::Series:::'
self.filter = ':::Series:::'
self.index = 0
self.wallindex = 0
self.pagecount = 1
self.oldindex = 0
self.pagemax = 1
self.makeMovies(self.filter)
else:
self.content = ':::Movie:::'
self.filter = ':::Movie:::'
self.index = 0
self.wallindex = 0
self.pagecount = 1
self.oldindex = 0
self.pagemax = 1
self.makeMovies(self.filter)
def editDatabase(self):
if self.ready == True:
self.session.openWithCallback(self.makeMovies, movieDatabase)
def wikipedia(self):
if self.ready == True:
if fileExists('/usr/lib/enigma2/python/Plugins/Extensions/Wikipedia/plugin.pyo'):
self.session.open(searchWikipedia, self.namelist[self.index], self.infolist[self.index][2], self.infolist[self.index][3])
else:
self.session.open(MessageBox, _('\nThe Wikipedia plugin could not be found.\n\nPlease download and install the plugin from:\nwww.kashmir-plugins.de'), MessageBox.TYPE_INFO)
return
def showPath(self):
if self.ready == True:
self.session.open(MessageBox, _('\nMovie File:\n%s') % self.movielist[self.index], MessageBox.TYPE_INFO)
def getIndex(self, list):
return list.getSelectedIndex()
def download(self, link, name):
getPage(link).addCallback(name).addErrback(self.downloadError)
def downloadError(self, output):
pass
def config(self):
if self.ready == True:
self.session.openWithCallback(self.exit, movieBrowserConfig)
def zap(self):
if self.ready == True:
servicelist = self.session.instantiateDialog(ChannelSelection)
self.session.execDialog(servicelist)
def hideScreen(self):
if self.hideflag == True:
self.hideflag = False
count = 40
if config.plugins.moviebrowser.m1v.value == 'yes':
while count > 0:
count -= 1
f = open('/proc/stb/video/alpha', 'w')
f.write('%i' % (config.plugins.moviebrowser.transparency.value * count / 40))
f.close()
else:
while count > 0:
count -= 1
f = open('/proc/stb/video/alpha', 'w')
f.write('%i' % (config.av.osd_alpha.value * count / 40))
f.close()
else:
self.hideflag = True
count = 0
if config.plugins.moviebrowser.m1v.value == 'yes':
while count < 40:
count += 1
f = open('/proc/stb/video/alpha', 'w')
f.write('%i' % (config.plugins.moviebrowser.transparency.value * count / 40))
f.close()
else:
while count < 40:
count += 1
f = open('/proc/stb/video/alpha', 'w')
f.write('%i' % (config.av.osd_alpha.value * count / 40))
f.close()
def exit(self):
if config.plugins.moviebrowser.showtv.value == 'hide' or config.plugins.moviebrowser.m1v.value == 'yes':
f = open('/proc/stb/video/alpha', 'w')
f.write('%i' % config.av.osd_alpha.value)
f.close()
self.session.nav.playService(self.oldService)
if self.hideflag == False:
f = open('/proc/stb/video/alpha', 'w')
f.write('%i' % config.av.osd_alpha.value)
f.close()
self.close()
class movieDatabase(Screen):
skin = '\n\t\t\t<screen position="center,center" size="730,523" title=" ">\n\t\t\t\t<ePixmap position="0,0" size="730,28" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/logo.png" zPosition="1"/>\n\t\t\t\t<widget name="list" position="10,38" size="710,475" scrollbarMode="showOnDemand" zPosition="1" />\n\t\t\t\t<widget name="list2" position="10,38" size="710,475" scrollbarMode="showOnDemand" zPosition="1" />\n\t\t\t</screen>'
def __init__(self, session):
Screen.__init__(self, session)
self.hideflag = True
self.ready = False
self.index = 0
self['list'] = MenuList([])
self['list2'] = MenuList([])
self.actlist = 'list'
self['actions'] = ActionMap(['OkCancelActions',
'DirectionActions',
'ColorActions',
'ChannelSelectBaseActions',
'HelpActions',
'NumberActions'], {'ok': self.ok,
'cancel': self.exit,
'right': self.rightDown,
'left': self.leftUp,
'down': self.down,
'up': self.up,
'nextBouquet': self.zap,
'prevBouquet': self.zap,
#'red': self.infoScreen,
#'yellow': self.infoScreen,
#'green': self.infoScreen,
#'blue': self.hideScreen,
'0': self.gotoEnd,
#'displayHelp': self.infoScreen
}, -1)
self.database = '/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/db/database'
self.onLayoutFinish.append(self.makeList)
def makeList(self):
self.namelist = []
self.datelist = []
self.runtimelist = []
self.ratinglist = []
self.directorlist = []
self.actorslist = []
self.genreslist = []
self.yearlist = []
self.countrylist = []
self.posterlist = []
self.backdroplist = []
self.list = []
if fileExists(self.database):
f = open(self.database, 'r')
for line in f:
movieline = line.split(':::')
try:
name = movieline[0]
except IndexError:
name = ' '
try:
date = movieline[2]
except IndexError:
date = ' '
try:
runtime = movieline[3]
except IndexError:
runtime = ' '
try:
rating = movieline[4]
except IndexError:
rating = ' '
try:
director = movieline[5]
except IndexError:
director = ' '
try:
actors = movieline[6]
except IndexError:
actors = ' '
try:
genres = movieline[7]
except IndexError:
genres = ' '
try:
year = movieline[8]
except IndexError:
year = ' '
try:
country = movieline[9]
except IndexError:
country = ' '
try:
poster = movieline[11]
except IndexError:
poster = 'http://cf2.imgobject.com/t/p/w154' + '/default_poster.png'
try:
backdrop = movieline[12]
except IndexError:
backdrop = 'http://cf2.imgobject.com/t/p/w1280' + '/default_backdrop.png'
self.namelist.append(name)
self.datelist.append(date)
self.runtimelist.append(runtime)
self.ratinglist.append(rating)
self.directorlist.append(director)
self.actorslist.append(actors)
self.genreslist.append(genres)
self.yearlist.append(year)
self.countrylist.append(country)
self.posterlist.append(poster)
self.backdroplist.append(backdrop)
self.list.append(name)
self['list'].l.setList(self.list)
self['list'].moveToIndex(self.index)
self.selectList()
self.ready = True
totalMovies = len(self.list)
if os.path.exists(config.plugins.moviebrowser.moviefolder.value):
movieFolder = os.statvfs(config.plugins.moviebrowser.moviefolder.value)
freeSize = movieFolder[statvfs.F_BSIZE] * movieFolder[statvfs.F_BFREE] / 1024 / 1024 / 1024
title = 'Database Editor: %s Movies (Movie Folder: %s GB free)' % (str(totalMovies), str(freeSize))
self.setTitle(title)
else:
title = 'Database Editor: %s Movies (Movie Folder: offline)' % str(totalMovies)
self.setTitle(title)
def makeList2(self):
self.list2 = []
self.list2.append('Movie: ' + self.namelist[self.index])
self.list2.append('Rating: ' + self.ratinglist[self.index])
self.list2.append('Director: ' + self.directorlist[self.index])
self.list2.append('Country: ' + self.countrylist[self.index])
self.list2.append('Actors: ' + self.actorslist[self.index])
self.list2.append('Year: ' + self.yearlist[self.index])
self.list2.append('Runtime: ' + self.runtimelist[self.index])
self.list2.append('Genres: ' + self.genreslist[self.index])
self.list2.append('Poster: ' + self.posterlist[self.index])
self.list2.append('Backdrop: ' + self.backdroplist[self.index])
self['list2'].l.setList(self.list2)
self.selectList2()
def ok(self):
if self.ready == True:
if self.actlist == 'list':
self.index = self['list'].getSelectedIndex()
self.date = self.datelist[self.index]
self.makeList2()
elif self.actlist == 'list2':
index = self['list2'].getSelectedIndex()
if index == 0:
self.data = self.namelist[self.index]
elif index == 1:
self.data = self.ratinglist[self.index]
elif index == 2:
self.data = self.directorlist[self.index]
elif index == 3:
self.data = self.countrylist[self.index]
elif index == 4:
self.data = self.actorslist[self.index]
elif index == 5:
self.data = self.yearlist[self.index]
elif index == 6:
self.data = self.runtimelist[self.index]
elif index == 7:
self.data = self.genreslist[self.index]
elif index == 8:
self.data = self.posterlist[self.index]
elif index == 9:
self.data = self.backdroplist[self.index]
self.session.openWithCallback(self.changeData, VirtualKeyBoard, title='Database Editor:', text=self.data)
def changeData(self, newdata):
if newdata and newdata != '' and newdata != self.data:
newdata = ':::' + newdata + ':::'
olddata = ':::' + self.data + ':::'
database = open(self.database).read()
for line in database.split('\n'):
if search(self.date, line) is not None:
newline = line.replace(olddata, newdata)
database = database.replace(line, newline)
f = open(self.database + '.new', 'w')
f.write(database)
f.close()
os.rename(self.database, self.database + '-backup')
os.rename(self.database + '.new', self.database)
self.makeList()
self.makeList2()
def selectList(self):
self.actlist = 'list'
self['list'].show()
self['list2'].hide()
self['list'].selectionEnabled(1)
self['list2'].selectionEnabled(0)
def selectList2(self):
self.actlist = 'list2'
self['list'].hide()
self['list2'].show()
self['list'].selectionEnabled(0)
self['list2'].selectionEnabled(1)
def up(self):
self[self.actlist].up()
def down(self):
self[self.actlist].down()
def leftUp(self):
self[self.actlist].pageUp()
def rightDown(self):
self[self.actlist].pageDown()
def gotoEnd(self):
end = len(self.list) - 1
self['list'].moveToIndex(end)
def zap(self):
servicelist = self.session.instantiateDialog(ChannelSelection)
self.session.execDialog(servicelist)
def hideScreen(self):
if self.hideflag == True:
self.hideflag = False
count = 40
while count > 0:
count -= 1
f = open('/proc/stb/video/alpha', 'w')
f.write('%i' % (config.av.osd_alpha.value * count / 40))
f.close()
else:
self.hideflag = True
count = 0
while count < 40:
count += 1
f = open('/proc/stb/video/alpha', 'w')
f.write('%i' % (config.av.osd_alpha.value * count / 40))
f.close()
def exit(self):
if self.hideflag == False:
f = open('/proc/stb/video/alpha', 'w')
f.write('%i' % config.av.osd_alpha.value)
f.close()
if self.actlist == 'list':
self.close(':::')
elif self.actlist == 'list2':
self.selectList()
class moviesList(Screen):
skin = '\n\t\t\t<screen position="center,center" size="730,538" title=" ">\n\t\t\t\t<ePixmap position="0,0" size="730,28" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/logo.png" zPosition="1"/>\n\t\t\t\t<widget name="poster1" position="10,33" size="80,120" alphatest="blend" zPosition="1" />\n\t\t\t\t<widget name="poster2" position="10,158" size="80,120" alphatest="blend" zPosition="1" />\n\t\t\t\t<widget name="poster3" position="10,283" size="80,120" alphatest="blend" zPosition="1" />\n\t\t\t\t<widget name="poster4" position="10,408" size="80,120" alphatest="blend" zPosition="1" />\n\t\t\t\t<widget name="list" position="100,33" size="620,500" scrollbarMode="showOnDemand" zPosition="1" />\n\t\t\t</screen>'
def __init__(self, session, titel, rating, year, titles, poster, id, country):
Screen.__init__(self, session)
self.titel = titel
self.rating = rating
self.year = year
self.titles = titles
self.poster = poster
self.id = id
self.country = country
self.movielist = []
self.poster1 = '/tmp/moviebrowser1.jpg'
self.poster2 = '/tmp/moviebrowser2.jpg'
self.poster3 = '/tmp/moviebrowser3.jpg'
self.poster4 = '/tmp/moviebrowser4.jpg'
self['poster1'] = Pixmap()
self['poster2'] = Pixmap()
self['poster3'] = Pixmap()
self['poster4'] = Pixmap()
self.ready = False
self.hideflag = True
self.setTitle(titel)
self['list'] = ItemList([])
self['actions'] = ActionMap(['OkCancelActions',
'DirectionActions',
'ColorActions',
'ChannelSelectBaseActions',
'HelpActions',
'NumberActions'], {'ok': self.ok,
'cancel': self.exit,
'right': self.rightDown,
'left': self.leftUp,
'down': self.down,
'up': self.up,
'nextBouquet': self.zap,
'prevBouquet': self.zap,
#'red': self.infoScreen,
#'yellow': self.infoScreen,
#'blue': self.hideScreen,
'0': self.gotoEnd,
#'displayHelp': self.infoScreen
}, -1)
self.onLayoutFinish.append(self.onLayoutFinished)
def onLayoutFinished(self):
try:
poster1 = self.poster[0]
self.download(poster1, self.getPoster1)
self['poster1'].show()
except IndexError:
self['poster1'].hide()
try:
poster2 = self.poster[1]
self.download(poster2, self.getPoster2)
self['poster2'].show()
except IndexError:
self['poster2'].hide()
try:
poster3 = self.poster[2]
self.download(poster3, self.getPoster3)
self['poster3'].show()
except IndexError:
self['poster3'].hide()
try:
poster4 = self.poster[3]
self.download(poster4, self.getPoster4)
self['poster4'].show()
except IndexError:
self['poster4'].hide()
for x in range(len(self.titles)):
res = ['']
try:
res.append(MultiContentEntryText(pos=(5, 13), size=(610, 30), font=24, color=16777215, color_sel=16777215, flags=RT_HALIGN_LEFT, text=self.titles[x]))
except IndexError:
pass
try:
res.append(MultiContentEntryText(pos=(5, 48), size=(50, 25), font=20, color=16777215, color_sel=16777215, flags=RT_HALIGN_LEFT, text=self.year[x]))
except IndexError:
pass
try:
res.append(MultiContentEntryText(pos=(55, 48), size=(560, 25), font=20, color=16777215, color_sel=16777215, flags=RT_HALIGN_LEFT, text=self.country[x]))
except IndexError:
pass
try:
rating = int(10 * round(float(self.rating[x]), 1)) * 2 + int(10 * round(float(self.rating[x]), 1)) // 10
except IndexError:
rating = 0
png = '/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/ratings_back.png'
if fileExists(png):
res.append(MultiContentEntryPixmapAlphaTest(pos=(5, 84), size=(210, 21), png=loadPNG(png)))
png2 = '/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/ratings.png'
if fileExists(png2):
res.append(MultiContentEntryPixmapAlphaTest(pos=(5, 84), size=(rating, 21), png=loadPNG(png2)))
try:
res.append(MultiContentEntryText(pos=(225, 84), size=(50, 25), font=20, color=16777215, color_sel=16777215, flags=RT_HALIGN_LEFT, text=self.rating[x]))
except IndexError:
pass
self.movielist.append(res)
self['list'].l.setList(self.movielist)
self['list'].l.setItemHeight(125)
self.ready = True
def ok(self):
if self.ready == True:
if fileExists(self.poster1):
os.remove(self.poster1)
if fileExists(self.poster2):
os.remove(self.poster2)
if fileExists(self.poster3):
os.remove(self.poster3)
if fileExists(self.poster4):
os.remove(self.poster4)
c = self['list'].getSelectedIndex()
current = self.id[c]
self.close(current)
def down(self):
if self.ready == True:
try:
c = self['list'].getSelectedIndex()
except IndexError:
pass
self['list'].down()
if c + 1 == len(self.titles):
try:
poster1 = self.poster[0]
self.download(poster1, self.getPoster1)
self['poster1'].show()
except IndexError:
self['poster1'].hide()
try:
poster2 = self.poster[1]
self.download(poster2, self.getPoster2)
self['poster2'].show()
except IndexError:
self['poster2'].hide()
try:
poster3 = self.poster[2]
self.download(poster3, self.getPoster3)
self['poster3'].show()
except IndexError:
self['poster3'].hide()
try:
poster4 = self.poster[3]
self.download(poster4, self.getPoster4)
self['poster4'].show()
except IndexError:
self['poster4'].hide()
elif c % 4 == 3:
try:
poster1 = self.poster[c + 1]
self.download(poster1, self.getPoster1)
self['poster1'].show()
except IndexError:
self['poster1'].hide()
try:
poster2 = self.poster[c + 2]
self.download(poster2, self.getPoster2)
self['poster2'].show()
except IndexError:
self['poster2'].hide()
try:
poster3 = self.poster[c + 3]
self.download(poster3, self.getPoster3)
self['poster3'].show()
except IndexError:
self['poster3'].hide()
try:
poster4 = self.poster[c + 4]
self.download(poster4, self.getPoster4)
self['poster4'].show()
except IndexError:
self['poster4'].hide()
def up(self):
if self.ready == True:
try:
c = self['list'].getSelectedIndex()
except IndexError:
pass
self['list'].up()
if c == 0:
l = len(self.titles)
d = l % 4
if d == 0:
d = 4
try:
poster1 = self.poster[l - d]
self.download(poster1, self.getPoster1)
self['poster1'].show()
except IndexError:
self['poster1'].hide()
try:
poster2 = self.poster[l - d + 1]
self.download(poster2, self.getPoster2)
self['poster2'].show()
except IndexError:
self['poster2'].hide()
try:
poster3 = self.poster[l - d + 2]
self.download(poster3, self.getPoster3)
self['poster3'].show()
except IndexError:
self['poster3'].hide()
try:
poster4 = self.poster[l - d + 3]
self.download(poster4, self.getPoster4)
self['poster4'].show()
except IndexError:
self['poster4'].hide()
elif c % 4 == 0:
try:
poster1 = self.poster[c - 4]
self.download(poster1, self.getPoster1)
self['poster1'].show()
except IndexError:
self['poster1'].hide()
try:
poster2 = self.poster[c - 3]
self.download(poster2, self.getPoster2)
self['poster2'].show()
except IndexError:
self['poster2'].hide()
try:
poster3 = self.poster[c - 2]
self.download(poster3, self.getPoster3)
self['poster3'].show()
except IndexError:
self['poster3'].hide()
try:
poster4 = self.poster[c - 1]
self.download(poster4, self.getPoster4)
self['poster4'].show()
except IndexError:
self['poster4'].hide()
def rightDown(self):
if self.ready == True:
try:
c = self['list'].getSelectedIndex()
except IndexError:
pass
self['list'].pageDown()
l = len(self.titles)
d = c % 4
e = l % 4
if e == 0:
e = 4
if c + e >= l:
pass
elif d == 0:
try:
poster1 = self.poster[c + 4]
self.download(poster1, self.getPoster1)
except IndexError:
self['poster1'].hide()
try:
poster2 = self.poster[c + 5]
self.download(poster2, self.getPoster2)
except IndexError:
self['poster2'].hide()
try:
poster3 = self.poster[c + 6]
self.download(poster3, self.getPoster3)
except IndexError:
self['poster3'].hide()
try:
poster4 = self.poster[c + 7]
self.download(poster4, self.getPoster4)
except IndexError:
self['poster4'].hide()
elif d == 1:
try:
poster1 = self.poster[c + 3]
self.download(poster1, self.getPoster1)
except IndexError:
self['poster1'].hide()
try:
poster2 = self.poster[c + 4]
self.download(poster2, self.getPoster2)
except IndexError:
self['poster2'].hide()
try:
poster3 = self.poster[c + 5]
self.download(poster3, self.getPoster3)
except IndexError:
self['poster3'].hide()
try:
poster4 = self.poster[c + 6]
self.download(poster4, self.getPoster4)
except IndexError:
self['poster4'].hide()
elif d == 2:
try:
poster1 = self.poster[c + 2]
self.download(poster1, self.getPoster1)
except IndexError:
self['poster1'].hide()
try:
poster2 = self.poster[c + 3]
self.download(poster2, self.getPoster2)
except IndexError:
self['poster2'].hide()
try:
poster3 = self.poster[c + 4]
self.download(poster3, self.getPoster3)
except IndexError:
self['poster3'].hide()
try:
poster4 = self.poster[c + 5]
self.download(poster4, self.getPoster4)
except IndexError:
self['poster4'].hide()
elif d == 3:
try:
poster1 = self.poster[c + 1]
self.download(poster1, self.getPoster1)
except IndexError:
self['poster1'].hide()
try:
poster2 = self.poster[c + 2]
self.download(poster2, self.getPoster2)
except IndexError:
self['poster2'].hide()
try:
poster3 = self.poster[c + 3]
self.download(poster3, self.getPoster3)
except IndexError:
self['poster3'].hide()
try:
poster4 = self.poster[c + 4]
self.download(poster4, self.getPoster4)
except IndexError:
self['poster4'].hide()
def leftUp(self):
if self.ready == True:
try:
c = self['list'].getSelectedIndex()
self['list'].pageUp()
d = c % 4
if c < 4:
pass
elif d == 0:
try:
poster1 = self.poster[c - 4]
self.download(poster1, self.getPoster1)
poster2 = self.poster[c - 3]
self.download(poster2, self.getPoster2)
poster3 = self.poster[c - 2]
self.download(poster3, self.getPoster3)
poster4 = self.poster[c - 1]
self.download(poster4, self.getPoster4)
except IndexError:
pass
elif d == 1:
try:
poster1 = self.poster[c - 5]
self.download(poster1, self.getPoster1)
poster2 = self.poster[c - 4]
self.download(poster2, self.getPoster2)
poster3 = self.poster[c - 3]
self.download(poster3, self.getPoster3)
poster4 = self.poster[c - 2]
self.download(poster4, self.getPoster4)
except IndexError:
pass
elif d == 2:
try:
poster1 = self.poster[c - 6]
self.download(poster1, self.getPoster1)
poster2 = self.poster[c - 5]
self.download(poster2, self.getPoster2)
poster3 = self.poster[c - 4]
self.download(poster3, self.getPoster3)
poster4 = self.poster[c - 3]
self.download(poster4, self.getPoster4)
except IndexError:
pass
elif d == 3:
try:
poster1 = self.poster[c - 7]
self.download(poster1, self.getPoster1)
poster2 = self.poster[c - 6]
self.download(poster2, self.getPoster2)
poster3 = self.poster[c - 5]
self.download(poster3, self.getPoster3)
poster4 = self.poster[c - 4]
self.download(poster4, self.getPoster4)
except IndexError:
pass
self['poster1'].show()
self['poster2'].show()
self['poster3'].show()
self['poster4'].show()
except IndexError:
pass
def gotoEnd(self):
if self.ready == True:
end = len(self.titles) - 1
if end > 4:
self['list'].moveToIndex(end)
self.leftUp()
self.rightDown()
def getPoster1(self, output):
f = open(self.poster1, 'wb')
f.write(output)
f.close()
self.showPoster1(self.poster1)
def showPoster1(self, poster1):
currPic = loadPic(poster1, 80, 120, 3, 0, 0, 1)
if currPic != None:
self['poster1'].instance.setPixmap(currPic)
def getPoster2(self, output):
f = open(self.poster2, 'wb')
f.write(output)
f.close()
self.showPoster2(self.poster2)
def showPoster2(self, poster2):
currPic = loadPic(poster2, 80, 120, 3, 0, 0, 1)
if currPic != None:
self['poster2'].instance.setPixmap(currPic)
def getPoster3(self, output):
f = open(self.poster3, 'wb')
f.write(output)
f.close()
self.showPoster3(self.poster3)
def showPoster3(self, poster3):
currPic = loadPic(poster3, 80, 120, 3, 0, 0, 1)
if currPic != None:
self['poster3'].instance.setPixmap(currPic)
def getPoster4(self, output):
f = open(self.poster4, 'wb')
f.write(output)
f.close()
self.showPoster4(self.poster4)
def showPoster4(self, poster4):
currPic = loadPic(poster4, 80, 120, 3, 0, 0, 1)
if currPic != None:
self['poster4'].instance.setPixmap(currPic)
def download(self, link, name):
getPage(link).addCallback(name).addErrback(self.downloadError)
def downloadError(self, output):
pass
def zap(self):
servicelist = self.session.instantiateDialog(ChannelSelection)
self.session.execDialog(servicelist)
def hideScreen(self):
if self.hideflag == True:
self.hideflag = False
count = 40
while count > 0:
count -= 1
f = open('/proc/stb/video/alpha', 'w')
f.write('%i' % (config.av.osd_alpha.value * count / 40))
f.close()
else:
self.hideflag = True
count = 0
while count < 40:
count += 1
f = open('/proc/stb/video/alpha', 'w')
f.write('%i' % (config.av.osd_alpha.value * count / 40))
f.close()
def exit(self):
if self.hideflag == False:
f = open('/proc/stb/video/alpha', 'w')
f.write('%i' % config.av.osd_alpha.value)
f.close()
if fileExists(self.poster1):
os.remove(self.poster1)
if fileExists(self.poster2):
os.remove(self.poster2)
if fileExists(self.poster3):
os.remove(self.poster3)
if fileExists(self.poster4):
os.remove(self.poster4)
c = self['list'].getSelectedIndex()
current = self.id[c]
self.close(current)
class filterList(Screen):
skin = '\n\t\t\t<screen position="center,center" size="270,523" title=" ">\n\t\t\t\t<ePixmap position="-230,0" size="500,28" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/logo.png" zPosition="1"/>\n\t\t\t\t<widget name="list" position="10,38" size="250,475" scrollbarMode="showOnDemand" zPosition="1" />\n\t\t\t</screen>'
def __init__(self, session, list, titel):
Screen.__init__(self, session)
self.list = list
self.hideflag = True
self.setTitle(titel)
self['list'] = MenuList([])
self['actions'] = ActionMap(['OkCancelActions',
'DirectionActions',
'ColorActions',
'ChannelSelectBaseActions',
'HelpActions',
'NumberActions'], {'ok': self.ok,
'cancel': self.exit,
'down': self.down,
'up': self.up,
'nextBouquet': self.zap,
'prevBouquet': self.zap,
#'red': self.infoScreen,
#'yellow': self.infoScreen,
#'green': self.infoScreen,
#'blue': self.hideScreen,
'7': self.resetFilter,
'8': self.resetFilter,
'9': self.resetFilter,
'0': self.gotoEnd,
#'displayHelp': self.infoScreen
}, -1)
self.onLayoutFinish.append(self.onLayoutFinished)
def onLayoutFinished(self):
self['list'].l.setList(self.list)
def ok(self):
current = self['list'].getCurrent()
self.close(current)
def resetFilter(self):
self.close(':::')
def down(self):
self['list'].down()
def up(self):
self['list'].up()
def gotoEnd(self):
end = len(self.list) - 1
self['list'].moveToIndex(end)
def zap(self):
servicelist = self.session.instantiateDialog(ChannelSelection)
self.session.execDialog(servicelist)
def hideScreen(self):
if self.hideflag == True:
self.hideflag = False
count = 40
while count > 0:
count -= 1
f = open('/proc/stb/video/alpha', 'w')
f.write('%i' % (config.av.osd_alpha.value * count / 40))
f.close()
else:
self.hideflag = True
count = 0
while count < 40:
count += 1
f = open('/proc/stb/video/alpha', 'w')
f.write('%i' % (config.av.osd_alpha.value * count / 40))
f.close()
def exit(self):
if self.hideflag == False:
f = open('/proc/stb/video/alpha', 'w')
f.write('%i' % config.av.osd_alpha.value)
f.close()
self.close(':::')
class filterSeasonList(Screen):
skin = '\n\t\t\t<screen position="center,center" size="530,523" title=" ">\n\t\t\t\t<ePixmap position="-100,0" size="630,28" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/logo.png" zPosition="1"/>\n\t\t\t\t<widget name="list" position="10,38" size="510,475" scrollbarMode="showOnDemand" zPosition="1" />\n\t\t\t</screen>'
def __init__(self, session, list):
Screen.__init__(self, session)
self.list = list
self.hideflag = True
self['list'] = MenuList([])
self['actions'] = ActionMap(['OkCancelActions',
'DirectionActions',
'ColorActions',
'ChannelSelectBaseActions',
'HelpActions',
'NumberActions'], {'ok': self.ok,
'cancel': self.exit,
'down': self.down,
'up': self.up,
'nextBouquet': self.zap,
'prevBouquet': self.zap,
#'red': self.infoScreen,
#'yellow': self.infoScreen,
#'green': self.infoScreen,
#'blue': self.hideScreen,
'4': self.resetFilter,
'0': self.gotoEnd,
#'displayHelp': self.infoScreen
}, -1)
self.onLayoutFinish.append(self.onLayoutFinished)
def onLayoutFinished(self):
self['list'].l.setList(self.list)
totalSeasons = len(self.list)
if os.path.exists(config.plugins.moviebrowser.moviefolder.value):
movieFolder = os.statvfs(config.plugins.moviebrowser.moviefolder.value)
freeSize = movieFolder[statvfs.F_BSIZE] * movieFolder[statvfs.F_BFREE] / 1024 / 1024 / 1024
title = '%s Series Seasons (Movie Folder: %s GB free)' % (str(totalSeasons), str(freeSize))
self.setTitle(title)
else:
title = '%s Series Seasons (Movie Folder: offline)' % str(totalSeasons)
self.setTitle(title)
def ok(self):
current = self['list'].getCurrent()
current = sub('Season ', '(S', current)
current = sub('season ', '(s', current)
self.close(current)
def resetFilter(self):
self.close(':::Series:::')
def down(self):
self['list'].down()
def up(self):
self['list'].up()
def gotoEnd(self):
end = len(self.list) - 1
self['list'].moveToIndex(end)
def zap(self):
servicelist = self.session.instantiateDialog(ChannelSelection)
self.session.execDialog(servicelist)
def hideScreen(self):
if self.hideflag == True:
self.hideflag = False
count = 40
while count > 0:
count -= 1
f = open('/proc/stb/video/alpha', 'w')
f.write('%i' % (config.av.osd_alpha.value * count / 40))
f.close()
else:
self.hideflag = True
count = 0
while count < 40:
count += 1
f = open('/proc/stb/video/alpha', 'w')
f.write('%i' % (config.av.osd_alpha.value * count / 40))
f.close()
def exit(self):
if self.hideflag == False:
f = open('/proc/stb/video/alpha', 'w')
f.write('%i' % config.av.osd_alpha.value)
f.close()
self.close(':::Series:::')
class allMovieList(Screen):
skin = '\n\t\t\t<screen position="center,center" size="730,523" title=" ">\n\t\t\t\t<ePixmap position="0,0" size="730,28" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/logo.png" zPosition="1"/>\n\t\t\t\t<widget name="list" position="10,38" size="710,475" scrollbarMode="showOnDemand" zPosition="1" />\n\t\t\t</screen>'
def __init__(self, session, list, index, content):
Screen.__init__(self, session)
self.list = list
self.index = index
self.content = content
self.hideflag = True
self['list'] = MenuList([])
self['actions'] = ActionMap(['OkCancelActions',
'DirectionActions',
'ColorActions',
'ChannelSelectBaseActions',
'HelpActions',
'NumberActions'], {'ok': self.ok,
'cancel': self.exit,
'down': self.down,
'up': self.up,
'nextBouquet': self.zap,
'prevBouquet': self.zap,
#'red': self.infoScreen,
#'yellow': self.infoScreen,
#'green': self.infoScreen,
#'blue': self.hideScreen,
'0': self.gotoEnd,
#'displayHelp': self.infoScreen
}, -1)
self.onLayoutFinish.append(self.onLayoutFinished)
def onLayoutFinished(self):
self['list'].l.setList(self.list)
try:
self['list'].moveToIndex(self.index)
except IndexError:
pass
totalMovies = len(self.list)
if os.path.exists(config.plugins.moviebrowser.moviefolder.value):
movieFolder = os.statvfs(config.plugins.moviebrowser.moviefolder.value)
freeSize = movieFolder[statvfs.F_BSIZE] * movieFolder[statvfs.F_BFREE] / 1024 / 1024 / 1024
if self.content == ':::Movie:::':
title = '%s Movies (Movie Folder: %s GB free)' % (str(totalMovies), str(freeSize))
elif self.content == ':::Series:::':
title = '%s Series (Movie Folder: %s GB free)' % (str(totalMovies), str(freeSize))
else:
title = '%s Movies & Series (Movie Folder: %s GB free)' % (str(totalMovies), str(freeSize))
self.setTitle(title)
else:
if self.content == ':::Movie:::':
title = '%s Movies (Movie Folder: offline)' % str(totalMovies)
elif self.content == ':::Series:::':
title = '%s Series (Movie Folder: offline)' % str(totalMovies)
else:
title = '%s Movies & Series (Movie Folder: offline)' % str(totalMovies)
self.setTitle(title)
def ok(self):
index = self['list'].getSelectedIndex()
self.close(index)
def down(self):
self['list'].down()
def up(self):
self['list'].up()
def gotoEnd(self):
end = len(self.list) - 1
self['list'].moveToIndex(end)
def zap(self):
servicelist = self.session.instantiateDialog(ChannelSelection)
self.session.execDialog(servicelist)
def hideScreen(self):
if self.hideflag == True:
self.hideflag = False
count = 40
while count > 0:
count -= 1
f = open('/proc/stb/video/alpha', 'w')
f.write('%i' % (config.av.osd_alpha.value * count / 40))
f.close()
else:
self.hideflag = True
count = 0
while count < 40:
count += 1
f = open('/proc/stb/video/alpha', 'w')
f.write('%i' % (config.av.osd_alpha.value * count / 40))
f.close()
def exit(self):
if self.hideflag == False:
f = open('/proc/stb/video/alpha', 'w')
f.write('%i' % config.av.osd_alpha.value)
f.close()
index = self['list'].getSelectedIndex()
self.close(index)
class searchWikipedia(Screen):
skin = '\n\t\t\t<screen position="center,center" size="550,295" title="Wikipedia - Search for Movie, Director or Actor">\n\t\t\t\t<ePixmap position="0,0" size="550,50" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/Wikipedia/pic/wiki.png" zPosition="1"/>\n\t\t\t\t<widget name="list" position="10,60" size="530,225" scrollbarMode="showOnDemand" zPosition="1" />\n\t\t\t</screen>'
def __init__(self, session, movie, director, actors):
Screen.__init__(self, session)
self.hideflag = True
self.movie = movie
self.director = director
self.actors = actors
self.list = []
self['list'] = MenuList([])
self['actions'] = ActionMap(['OkCancelActions',
'DirectionActions',
'ColorActions',
'ChannelSelectBaseActions',
'HelpActions',
'NumberActions'], {'ok': self.ok,
'cancel': self.exit,
'down': self.down,
'up': self.up,
'nextBouquet': self.zap,
'prevBouquet': self.zap,
#'red': self.infoScreen,
#'yellow': self.infoScreen,
#'green': self.infoScreen,
#'blue': self.hideScreen,
'0': self.gotoEnd,
#'displayHelp': self.infoScreen
}, -1)
self.onLayoutFinish.append(self.onLayoutFinished)
def onLayoutFinished(self):
self.list.append('Movie: ' + self.movie)
self.list.append('Director: ' + self.director)
self.actor = [ i for i in self.actors.split(', ') ]
idx = 0
for x in self.actor:
idx += 1
for i in range(idx):
self.list.append('Actor: ' + self.actor[i])
self['list'].l.setList(self.list)
def ok(self):
index = self['list'].getSelectedIndex()
if index == 0:
name = self.movie
elif index == 1:
name = self.director
elif index == 2:
name = self.actor[0]
elif index == 3:
name = self.actor[1]
elif index == 4:
name = self.actor[2]
elif index == 5:
name = self.actor[3]
elif index == 6:
name = self.actor[4]
elif index == 7:
name = self.actor[5]
elif index == 8:
name = self.actor[6]
def down(self):
self['list'].down()
def up(self):
self['list'].up()
def gotoEnd(self):
end = len(self.list) - 1
self['list'].moveToIndex(end)
def zap(self):
servicelist = self.session.instantiateDialog(ChannelSelection)
self.session.execDialog(servicelist)
def hideScreen(self):
if self.hideflag == True:
self.hideflag = False
count = 40
while count > 0:
count -= 1
f = open('/proc/stb/video/alpha', 'w')
f.write('%i' % (config.av.osd_alpha.value * count / 40))
f.close()
else:
self.hideflag = True
count = 0
while count < 40:
count += 1
f = open('/proc/stb/video/alpha', 'w')
f.write('%i' % (config.av.osd_alpha.value * count / 40))
f.close()
def exit(self):
if self.hideflag == False:
f = open('/proc/stb/video/alpha', 'w')
f.write('%i' % config.av.osd_alpha.value)
f.close()
self.close()
class ItemList(MenuList):
def __init__(self, items, enableWrapAround = True):
MenuList.__init__(self, items, enableWrapAround, eListboxPythonMultiContent)
self.l.setFont(24, gFont('Regular', 24))
self.l.setFont(22, gFont('Regular', 22))
self.l.setFont(20, gFont('Regular', 20))
class movieBrowserConfig(ConfigListScreen, Screen):
skin = '\n\t\t\t<screen position="center,center" size="530,500" backgroundColor="#20000000" title="Movie Browser Setup">\n\t\t\t\t<ePixmap position="-100,0" size="630,28" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/logo.png" alphatest="blend" zPosition="1" />\n\t\t\t\t<ePixmap position="9,37" size="512,1" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/seperator.png" alphatest="off" zPosition="1" />\n\t\t\t\t<widget name="config" position="9,38" size="512,125" itemHeight="25" scrollbarMode="showOnDemand" zPosition="1" />\n\t\t\t\t<ePixmap position="9,164" size="512,1" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/seperator.png" alphatest="off" zPosition="1" />\n\t\t\t\t<eLabel position="150,173" size="125,20" font="Regular;18" halign="left" text="Save" transparent="1" zPosition="1" />\n\t\t\t\t<eLabel position="365,173" size="125,20" font="Regular;18" halign="left" text="Cancel" transparent="1" zPosition="1" />\n\t\t\t\t<ePixmap position="125,174" size="18,18" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/green.png" alphatest="blend" zPosition="1" />\n\t\t\t\t<ePixmap position="340,174" size="18,18" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/red.png" alphatest="blend" zPosition="1" />\n\t\t\t\t<widget name="plugin" position="9,203" size="512,288" alphatest="blend" zPosition="1" />\n\t\t\t</screen>'
def __init__(self, session):
Screen.__init__(self, session)
self['plugin'] = Pixmap()
self.sortorder = config.plugins.moviebrowser.sortorder.value
self.cachefolder = config.plugins.moviebrowser.cachefolder.value
self.database = '/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/db/database'
self.ready = True
list = []
list.append(getConfigListEntry(_('Plugin Style:'), config.plugins.moviebrowser.style))
self.foldername = getConfigListEntry(_('Movie Folder:'), config.plugins.moviebrowser.moviefolder)
list.append(self.foldername)
#list.append(getConfigListEntry(_('Cache Folder:'), config.plugins.moviebrowser.cachefolder))
list.append(getConfigListEntry(_('Default Database:'), config.plugins.moviebrowser.database))
list.append(getConfigListEntry(_('TMDb/TheTVDb Language:'), config.plugins.moviebrowser.language))
#list.append(getConfigListEntry(_('Plugin Size:'), config.plugins.moviebrowser.plugin_size))
list.append(getConfigListEntry(_('Show Content:'), config.plugins.moviebrowser.filter))
#list.append(getConfigListEntry(_('Show Backdrops:'), config.plugins.moviebrowser.backdrops))
#list.append(getConfigListEntry(_('Show Plot Full:'), config.plugins.moviebrowser.plotfull))
#list.append(getConfigListEntry(_('Plot Full Font Size:'), config.plugins.moviebrowser.plotfont))
#list.append(getConfigListEntry(_('Headline Color:'), config.plugins.moviebrowser.color))
list.append(getConfigListEntry(_('Sort Order:'), config.plugins.moviebrowser.sortorder))
#list.append(getConfigListEntry(_('Support m1v Backdrops:'), config.plugins.moviebrowser.m1v))
#list.append(getConfigListEntry(_('m1v Transparency:'), config.plugins.moviebrowser.transparency))
#list.append(getConfigListEntry(_('Show TV on Plugin Start:'), config.plugins.moviebrowser.showtv))
#list.append(getConfigListEntry(_('Show Plugin in Main Menu:'), config.plugins.moviebrowser.menu))
list.append(getConfigListEntry(_('Reset Database:'), config.plugins.moviebrowser.reset))
ConfigListScreen.__init__(self, list, on_change=self.UpdateComponents)
self['key_red'] = Label(_('Cancel'))
self['key_green'] = Label(_('Save'))
self['actions'] = ActionMap(['SetupActions', 'ColorActions'], {'ok': self.save,
'cancel': self.cancel,
'red': self.cancel,
'green': self.save}, -1)
self.onLayoutFinish.append(self.UpdateComponents)
def UpdateComponents(self):
png = '/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/' + config.plugins.moviebrowser.style.value + '.png'
if fileExists(png):
PNG = loadPic(png, 512, 288, 3, 0, 0, 1)
if PNG != None:
self['plugin'].instance.setPixmap(PNG)
current = self['config'].getCurrent()
if current == self.foldername:
self.session.openWithCallback(self.folderSelected, FolderSelection, config.plugins.moviebrowser.moviefolder.value)
def folderSelected(self, folder):
if folder is not None:
config.plugins.moviebrowser.moviefolder.value = folder
config.plugins.moviebrowser.moviefolder.save()
def save(self):
if self.ready == True:
self.ready = False
if config.plugins.moviebrowser.sortorder.value != self.sortorder:
if fileExists(self.database):
f = open(self.database, 'r')
lines = f.readlines()
f.close()
if config.plugins.moviebrowser.sortorder.value == 'name':
lines.sort(key=lambda line: line.split(':::')[0].replace('Der ', '').replace('Die ', '').replace('Das ', '').replace('The ', '').lower())
elif config.plugins.moviebrowser.sortorder.value == 'name_reverse':
lines.sort(key=lambda line: line.split(':::')[0].replace('Der ', '').replace('Die ', '').replace('Das ', '').replace('The ', '').lower(), reverse=True)
elif config.plugins.moviebrowser.sortorder.value == 'rating':
lines.sort(key=lambda line: line.split(':::')[4])
elif config.plugins.moviebrowser.sortorder.value == 'rating_reverse':
lines.sort(key=lambda line: line.split(':::')[4], reverse=True)
elif config.plugins.moviebrowser.sortorder.value == 'year':
lines.sort(key=lambda line: line.split(':::')[8])
elif config.plugins.moviebrowser.sortorder.value == 'year_reverse':
lines.sort(key=lambda line: line.split(':::')[8], reverse=True)
elif config.plugins.moviebrowser.sortorder.value == 'date':
lines.sort(key=lambda line: line.split(':::')[2])
elif config.plugins.moviebrowser.sortorder.value == 'date_reverse':
lines.sort(key=lambda line: line.split(':::')[2], reverse=True)
elif config.plugins.moviebrowser.sortorder.value == 'folder':
lines.sort(key=lambda line: line.split(':::')[1])
elif config.plugins.moviebrowser.sortorder.value == 'folder_reverse':
lines.sort(key=lambda line: line.split(':::')[1], reverse=True)
fsorted = open(self.database + '.sorted', 'w')
fsorted.writelines(lines)
fsorted.close()
os.rename(self.database + '.sorted', self.database)
if config.plugins.moviebrowser.reset.value == 'yes':
if fileExists(self.database):
os.rename(self.database, self.database + '-backup')
open('/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/db/reset', 'w').close()
config.plugins.moviebrowser.reset.value = 'no'
config.plugins.moviebrowser.reset.save()
if config.plugins.moviebrowser.cachefolder.value != self.cachefolder:
self.container = eConsoleAppContainer()
self.container.appClosed.append(self.finished)
newcache = sub('/cache', '', config.plugins.moviebrowser.cachefolder.value)
self.container.execute("mkdir -p '%s' && cp -r '%s' '%s' && rm -rf '%s'" % (config.plugins.moviebrowser.cachefolder.value,
self.cachefolder,
newcache,
self.cachefolder))
else:
for x in self['config'].list:
x[1].save()
configfile.save()
self.exit()
def finished(self, retval):
del self.container.appClosed[:]
del self.container
for x in self['config'].list:
x[1].save()
configfile.save()
self.exit()
def cancel(self):
for x in self['config'].list:
x[1].cancel()
self.exit()
def exit(self):
if config.plugins.moviebrowser.style.value == 'backdrop':
self.session.openWithCallback(self.close, movieBrowserBackdrop, 0, config.plugins.moviebrowser.filter.value, config.plugins.moviebrowser.filter.value)
elif config.plugins.moviebrowser.style.value == 'posterwall':
self.session.openWithCallback(self.close, movieBrowserPosterwall, 0, config.plugins.moviebrowser.filter.value, config.plugins.moviebrowser.filter.value)
class FolderSelection(Screen):
skin = '\n\t\t\t<screen position="center,center" size="530,500" backgroundColor="#20000000" title="Movie Browser Setup">\n\t\t\t\t<ePixmap position="-100,0" size="630,28" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/logo.png" alphatest="blend" zPosition="1" />\n\t\t\t\t<ePixmap position="9,37" size="512,1" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/seperator.png" alphatest="off" zPosition="1" />\n\t\t\t\t<widget name="folderlist" position="9,38" size="512,125" itemHeight="25" scrollbarMode="showOnDemand" zPosition="1" />\n\t\t\t\t<ePixmap position="9,164" size="512,1" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/seperator.png" alphatest="off" zPosition="1" />\n\t\t\t\t<eLabel position="150,173" size="125,20" font="Regular;18" halign="left" text="Save" transparent="1" zPosition="1" />\n\t\t\t\t<eLabel position="365,173" size="125,20" font="Regular;18" halign="left" text="Cancel" transparent="1" zPosition="1" />\n\t\t\t\t<ePixmap position="125,174" size="18,18" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/green.png" alphatest="blend" zPosition="1" />\n\t\t\t\t<ePixmap position="340,174" size="18,18" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/red.png" alphatest="blend" zPosition="1" />\n\t\t\t\t<widget name="plugin" position="9,203" size="512,288" alphatest="blend" zPosition="1" />\n\t\t\t</screen>'
def __init__(self, session, folder):
Screen.__init__(self, session)
self['plugin'] = Pixmap()
noFolder = ['/bin',
'/boot',
'/dev',
'/etc',
'/lib',
'/proc',
'/sbin',
'/sys']
self['folderlist'] = FileList(folder, showDirectories=True, showFiles=False, inhibitDirs=noFolder)
self['actions'] = ActionMap(['OkCancelActions', 'DirectionActions', 'ColorActions'], {'ok': self.ok,
'cancel': self.cancel,
'right': self.right,
'left': self.left,
'down': self.down,
'up': self.up,
'red': self.cancel,
'green': self.green}, -1)
self.onLayoutFinish.append(self.pluginPic)
def pluginPic(self):
png = '/usr/lib/enigma2/python/Plugins/Extensions/MovieBrowser/pic/' + config.plugins.moviebrowser.style.value + '.png'
if fileExists(png):
PNG = loadPic(png, 512, 288, 3, 0, 0, 1)
if PNG != None:
self['plugin'].instance.setPixmap(PNG)
def ok(self):
if self['folderlist'].canDescent():
self['folderlist'].descent()
def right(self):
self['folderlist'].pageDown()
def left(self):
self['folderlist'].pageUp()
def down(self):
self['folderlist'].down()
def up(self):
self['folderlist'].up()
def green(self):
self.close(self['folderlist'].getSelection()[0])
def cancel(self):
self.close(None)
def main(session, **kwargs):
if config.plugins.moviebrowser.style.value == 'backdrop':
session.open(movieBrowserBackdrop, 0, config.plugins.moviebrowser.filter.value, config.plugins.moviebrowser.filter.value)
elif config.plugins.moviebrowser.style.value == 'posterwall':
session.open(movieBrowserPosterwall, 0, config.plugins.moviebrowser.filter.value, config.plugins.moviebrowser.filter.value)
def menu(menuid, **kwargs):
if menuid == 'mainmenu':
return [(_('Movie Browser'),
main,
'moviebrowser',
42)]
return []
def Plugins(**kwargs):
return [] | gpl-2.0 |
AOSP-S4-KK/platform_external_chromium_org | chrome/common/extensions/docs/server2/api_data_source_test.py | 23 | 10290 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import sys
import unittest
from api_data_source import (_JSCModel,
_FormatValue,
_GetEventByNameFromEvents)
from branch_utility import ChannelInfo
from extensions_paths import EXTENSIONS
from file_system import FileNotFoundError
from future import Future
from object_store_creator import ObjectStoreCreator
from reference_resolver import ReferenceResolver
from server_instance import ServerInstance
from test_data.canned_data import (CANNED_API_FILE_SYSTEM_DATA, CANNED_BRANCHES)
from test_data.api_data_source.canned_trunk_fs import CANNED_TRUNK_FS_DATA
from test_file_system import TestFileSystem
from third_party.json_schema_compiler.memoize import memoize
def _MakeLink(href, text):
return '<a href="%s">%s</a>' % (href, text)
def _GetType(dict_, name):
for type_ in dict_['types']:
if type_['name'] == name:
return type_
class _FakeAvailabilityFinder(object):
def GetApiAvailability(self, version):
return ChannelInfo('stable', '396', 5)
class _FakeHostFileSystemProvider(object):
def __init__(self, file_system_data):
self._file_system_data = file_system_data
def GetTrunk(self):
return self.GetBranch('trunk')
@memoize
def GetBranch(self, branch):
return TestFileSystem(self._file_system_data[str(branch)])
class _FakeSamplesDataSource(object):
def Create(self, request):
return {}
# Sad irony :(
class _FakeAPIDataSource(object):
def __init__(self, json_data):
self._json = json_data
def Create(self, *args, **kwargs):
return self
def get(self, key, disable_refs=False):
if key not in self._json:
raise FileNotFoundError(key)
return self._json[key]
class _FakeAPIModels(object):
def __init__(self, names):
self._names = names
def GetNames(self):
return self._names
class _FakeTemplateCache(object):
def GetFromFile(self, key):
return Future(value='handlebar %s' % key)
class APIDataSourceTest(unittest.TestCase):
def setUp(self):
self._base_path = os.path.join(sys.path[0], 'test_data', 'test_json')
server_instance = ServerInstance.ForTest(
TestFileSystem(CANNED_TRUNK_FS_DATA, relative_to=EXTENSIONS))
self._json_cache = server_instance.compiled_fs_factory.ForJson(
server_instance.host_file_system_provider.GetTrunk())
self._api_models = server_instance.api_models
# Used for testGetApiAvailability() so that valid-ish data is processed.
server_instance = ServerInstance.ForTest(
file_system_provider=_FakeHostFileSystemProvider(
CANNED_API_FILE_SYSTEM_DATA))
self._avail_api_models = server_instance.api_models
self._avail_json_cache = server_instance.compiled_fs_factory.ForJson(
server_instance.host_file_system_provider.GetTrunk())
self._avail_finder = server_instance.availability_finder
def _ReadLocalFile(self, filename):
with open(os.path.join(self._base_path, filename), 'r') as f:
return f.read()
def _CreateRefResolver(self, filename):
test_data = self._LoadJSON(filename)
return ReferenceResolver.Factory(_FakeAPIDataSource(test_data),
_FakeAPIModels(test_data),
ObjectStoreCreator.ForTest()).Create()
def _LoadJSON(self, filename):
return json.loads(self._ReadLocalFile(filename))
def testCreateId(self):
dict_ = _JSCModel('tester',
self._api_models,
self._CreateRefResolver('test_file_data_source.json'),
False,
_FakeAvailabilityFinder(),
self._json_cache,
_FakeTemplateCache(),
None).ToDict()
self.assertEquals('type-TypeA', dict_['types'][0]['id'])
self.assertEquals('property-TypeA-b',
dict_['types'][0]['properties'][0]['id'])
self.assertEquals('method-get', dict_['functions'][0]['id'])
self.assertEquals('event-EventA', dict_['events'][0]['id'])
# TODO(kalman): re-enable this when we have a rebase option.
def DISABLED_testToDict(self):
expected_json = self._LoadJSON('expected_tester.json')
dict_ = _JSCModel('tester',
self._api_models,
False,
self._CreateRefResolver('test_file_data_source.json'),
_FakeAvailabilityFinder(),
self._json_cache,
_FakeTemplateCache(),
None).ToDict()
self.assertEquals(expected_json, dict_)
def testFormatValue(self):
self.assertEquals('1,234,567', _FormatValue(1234567))
self.assertEquals('67', _FormatValue(67))
self.assertEquals('234,567', _FormatValue(234567))
def testFormatDescription(self):
dict_ = _JSCModel('ref_test',
self._api_models,
self._CreateRefResolver('ref_test_data_source.json'),
False,
_FakeAvailabilityFinder(),
self._json_cache,
_FakeTemplateCache(),
None).ToDict()
self.assertEquals(_MakeLink('ref_test.html#type-type2', 'type2'),
_GetType(dict_, 'type1')['description'])
self.assertEquals(
'A %s, or %s' % (_MakeLink('ref_test.html#type-type3', 'type3'),
_MakeLink('ref_test.html#type-type2', 'type2')),
_GetType(dict_, 'type2')['description'])
self.assertEquals(
'%s != %s' % (_MakeLink('other.html#type-type2', 'other.type2'),
_MakeLink('ref_test.html#type-type2', 'type2')),
_GetType(dict_, 'type3')['description'])
def testGetApiAvailability(self):
api_availabilities = {
'bluetooth': ChannelInfo('dev', CANNED_BRANCHES[28], 28),
'contextMenus': ChannelInfo('trunk', CANNED_BRANCHES['trunk'], 'trunk'),
'jsonStableAPI': ChannelInfo('stable', CANNED_BRANCHES[20], 20),
'idle': ChannelInfo('stable', CANNED_BRANCHES[5], 5),
'input.ime': ChannelInfo('stable', CANNED_BRANCHES[18], 18),
'tabs': ChannelInfo('stable', CANNED_BRANCHES[18], 18)
}
for api_name, availability in api_availabilities.iteritems():
model = _JSCModel(api_name,
self._avail_api_models,
None,
True,
self._avail_finder,
self._avail_json_cache,
_FakeTemplateCache(),
None)
self.assertEquals(availability, model._GetApiAvailability())
def testGetIntroList(self):
model = _JSCModel('tester',
self._api_models,
self._CreateRefResolver('test_file_data_source.json'),
False,
_FakeAvailabilityFinder(),
self._json_cache,
_FakeTemplateCache(),
None)
expected_list = [
{ 'title': 'Description',
'content': [
{ 'text': 'a test api' }
]
},
{ 'title': 'Availability',
'content': [
{ 'partial': 'handlebar chrome/common/extensions/docs/' +
'templates/private/intro_tables/stable_message.html',
'version': 5
}
]
},
{ 'title': 'Permissions',
'content': [
{ 'class': 'override',
'text': '"tester"'
},
{ 'text': 'is an API for testing things.' }
]
},
{ 'title': 'Manifest',
'content': [
{ 'class': 'code',
'text': '"tester": {...}'
}
]
},
{ 'title': 'Learn More',
'content': [
{ 'link': 'https://tester.test.com/welcome.html',
'text': 'Welcome!'
}
]
}
]
self.assertEquals(model._GetIntroTableList(), expected_list)
def testGetEventByNameFromEvents(self):
events = {}
# Missing 'types' completely.
self.assertRaises(AssertionError, _GetEventByNameFromEvents, events)
events['types'] = []
# No type 'Event' defined.
self.assertRaises(AssertionError, _GetEventByNameFromEvents, events)
events['types'].append({ 'name': 'Event',
'functions': []})
add_rules = { "name": "addRules" }
events['types'][0]['functions'].append(add_rules)
self.assertEqual(add_rules,
_GetEventByNameFromEvents(events)['addRules'])
events['types'][0]['functions'].append(add_rules)
# Duplicates are an error.
self.assertRaises(AssertionError, _GetEventByNameFromEvents, events)
def _FakeLoadAddRulesSchema(self):
events = self._LoadJSON('add_rules_def_test.json')
return _GetEventByNameFromEvents(events)
def testAddRules(self):
dict_ = _JSCModel('add_rules_tester',
self._api_models,
self._CreateRefResolver('test_file_data_source.json'),
False,
_FakeAvailabilityFinder(),
self._json_cache,
_FakeTemplateCache(),
self._FakeLoadAddRulesSchema).ToDict()
# Check that the first event has the addRulesFunction defined.
self.assertEquals('add_rules_tester', dict_['name'])
self.assertEquals('rules', dict_['events'][0]['name'])
self.assertEquals('notable_name_to_check_for',
dict_['events'][0]['byName']['addRules'][
'parameters'][0]['name'])
# Check that the second event has addListener defined.
self.assertEquals('noRules', dict_['events'][1]['name'])
self.assertEquals('add_rules_tester', dict_['name'])
self.assertEquals('noRules', dict_['events'][1]['name'])
self.assertEquals('callback',
dict_['events'][0]['byName']['addListener'][
'parameters'][0]['name'])
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
jhawkesworth/ansible | lib/ansible/plugins/terminal/slxos.py | 177 | 1962 | #
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ansible.errors import AnsibleConnectionFailure
from ansible.plugins.terminal import TerminalBase
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"([\r\n]|(\x1b\[\?7h))[\w\+\-\.:\/\[\]]+(?:\([^\)]+\)){0,3}(?:[>#]) ?$")
]
terminal_stderr_re = [
re.compile(br"% ?Error"),
# re.compile(br"^% \w+", re.M),
re.compile(br"% ?Bad secret"),
re.compile(br"[\r\n%] Bad passwords"),
re.compile(br"invalid input", re.I),
re.compile(br"(?:incomplete|ambiguous) command", re.I),
re.compile(br"connection timed out", re.I),
re.compile(br"[^\r\n]+ not found"),
re.compile(br"'[^']' +returned error code: ?\d+"),
re.compile(br"Bad mask", re.I),
re.compile(br"% ?(\S+) ?overlaps with ?(\S+)", re.I),
re.compile(br"[%\S] ?Informational: ?[\s]+", re.I),
re.compile(br"syntax error: unknown argument.", re.I)
]
def on_open_shell(self):
try:
self._exec_cli_command(u'terminal length 0')
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
| gpl-3.0 |
LaoZhongGu/kbengine | kbe/res/scripts/common/Lib/ctypes/test/test_memfunctions.py | 50 | 3265 | import sys
import unittest
from ctypes import *
class MemFunctionsTest(unittest.TestCase):
## def test_overflow(self):
## # string_at and wstring_at must use the Python calling
## # convention (which acquires the GIL and checks the Python
## # error flag). Provoke an error and catch it; see also issue
## # #3554: <http://bugs.python.org/issue3554>
## self.assertRaises((OverflowError, MemoryError, SystemError),
## lambda: wstring_at(u"foo", sys.maxint - 1))
## self.assertRaises((OverflowError, MemoryError, SystemError),
## lambda: string_at("foo", sys.maxint - 1))
def test_memmove(self):
# large buffers apparently increase the chance that the memory
# is allocated in high address space.
a = create_string_buffer(1000000)
p = b"Hello, World"
result = memmove(a, p, len(p))
self.assertEqual(a.value, b"Hello, World")
self.assertEqual(string_at(result), b"Hello, World")
self.assertEqual(string_at(result, 5), b"Hello")
self.assertEqual(string_at(result, 16), b"Hello, World\0\0\0\0")
self.assertEqual(string_at(result, 0), b"")
def test_memset(self):
a = create_string_buffer(1000000)
result = memset(a, ord('x'), 16)
self.assertEqual(a.value, b"xxxxxxxxxxxxxxxx")
self.assertEqual(string_at(result), b"xxxxxxxxxxxxxxxx")
self.assertEqual(string_at(a), b"xxxxxxxxxxxxxxxx")
self.assertEqual(string_at(a, 20), b"xxxxxxxxxxxxxxxx\0\0\0\0")
def test_cast(self):
a = (c_ubyte * 32)(*map(ord, "abcdef"))
self.assertEqual(cast(a, c_char_p).value, b"abcdef")
self.assertEqual(cast(a, POINTER(c_byte))[:7],
[97, 98, 99, 100, 101, 102, 0])
self.assertEqual(cast(a, POINTER(c_byte))[:7:],
[97, 98, 99, 100, 101, 102, 0])
self.assertEqual(cast(a, POINTER(c_byte))[6:-1:-1],
[0, 102, 101, 100, 99, 98, 97])
self.assertEqual(cast(a, POINTER(c_byte))[:7:2],
[97, 99, 101, 0])
self.assertEqual(cast(a, POINTER(c_byte))[:7:7],
[97])
def test_string_at(self):
s = string_at(b"foo bar")
# XXX The following may be wrong, depending on how Python
# manages string instances
self.assertEqual(2, sys.getrefcount(s))
self.assertTrue(s, "foo bar")
self.assertEqual(string_at(b"foo bar", 7), b"foo bar")
self.assertEqual(string_at(b"foo bar", 3), b"foo")
try:
create_unicode_buffer
except NameError:
pass
else:
def test_wstring_at(self):
p = create_unicode_buffer("Hello, World")
a = create_unicode_buffer(1000000)
result = memmove(a, p, len(p) * sizeof(c_wchar))
self.assertEqual(a.value, "Hello, World")
self.assertEqual(wstring_at(a), "Hello, World")
self.assertEqual(wstring_at(a, 5), "Hello")
self.assertEqual(wstring_at(a, 16), "Hello, World\0\0\0\0")
self.assertEqual(wstring_at(a, 0), "")
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 |
kawamon/hue | desktop/core/ext-py/eventlet-0.24.1/tests/manual/greenio_memtest.py | 10 | 1872 | import eventlet
from eventlet import greenio
import os
__test__ = False
_proc_status = '/proc/%d/status' % os.getpid()
_scale = {'kB': 1024.0, 'mB': 1024.0 * 1024.0,
'KB': 1024.0, 'MB': 1024.0 * 1024.0}
def _VmB(VmKey):
'''Private.
'''
global _proc_status, _scale
# get pseudo file /proc/<pid>/status
try:
t = open(_proc_status)
v = t.read()
t.close()
except:
return 0.0 # non-Linux?
# get VmKey line e.g. 'VmRSS: 9999 kB\n ...'
i = v.index(VmKey)
v = v[i:].split(None, 3) # whitespace
if len(v) < 3:
return 0.0 # invalid format?
# convert Vm value to bytes
return float(v[1]) * _scale[v[2]]
def memory(since=0.0):
'''Return memory usage in bytes.
'''
return _VmB('VmSize:') - since
def resident(since=0.0):
'''Return resident memory usage in bytes.
'''
return _VmB('VmRSS:') - since
def stacksize(since=0.0):
'''Return stack size in bytes.
'''
return _VmB('VmStk:') - since
def test_pipe_writes_large_messages():
r, w = os.pipe()
r = greenio.GreenPipe(r)
w = greenio.GreenPipe(w, 'w')
large_message = b"".join([1024 * chr(i) for i in range(65)])
def writer():
w.write(large_message)
w.close()
gt = eventlet.spawn(writer)
for i in range(65):
buf = r.read(1024)
expected = 1024 * chr(i)
if buf != expected:
print(
"expected=%r..%r, found=%r..%r iter=%d"
% (expected[:4], expected[-4:], buf[:4], buf[-4:], i))
gt.wait()
if __name__ == "__main__":
_iter = 1
while True:
test_pipe_writes_large_messages()
_iter += 1
if _iter % 10 == 0:
print("_iter = %d, VmSize: %d, VmRSS = %d, VmStk = %d" %
(_iter, memory(), resident(), stacksize()))
| apache-2.0 |
unseenlaser/python-for-android | python3-alpha/python3-src/Lib/email/test/test_email_codecs.py | 50 | 3394 | # Copyright (C) 2002-2006 Python Software Foundation
# Contact: [email protected]
# email package unit tests for (optional) Asian codecs
import unittest
from test.support import run_unittest
from email.test.test_email import TestEmailBase
from email.charset import Charset
from email.header import Header, decode_header
from email.message import Message
# We're compatible with Python 2.3, but it doesn't have the built-in Asian
# codecs, so we have to skip all these tests.
try:
str(b'foo', 'euc-jp')
except LookupError:
raise unittest.SkipTest
class TestEmailAsianCodecs(TestEmailBase):
def test_japanese_codecs(self):
eq = self.ndiffAssertEqual
jcode = "euc-jp"
gcode = "iso-8859-1"
j = Charset(jcode)
g = Charset(gcode)
h = Header("Hello World!")
jhello = str(b'\xa5\xcf\xa5\xed\xa1\xbc\xa5\xef\xa1\xbc'
b'\xa5\xeb\xa5\xc9\xa1\xaa', jcode)
ghello = str(b'Gr\xfc\xdf Gott!', gcode)
h.append(jhello, j)
h.append(ghello, g)
# BAW: This used to -- and maybe should -- fold the two iso-8859-1
# chunks into a single encoded word. However it doesn't violate the
# standard to have them as two encoded chunks and maybe it's
# reasonable <wink> for each .append() call to result in a separate
# encoded word.
eq(h.encode(), """\
Hello World! =?iso-2022-jp?b?GyRCJU8lbSE8JW8hPCVrJUkhKhsoQg==?=
=?iso-8859-1?q?Gr=FC=DF_Gott!?=""")
eq(decode_header(h.encode()),
[(b'Hello World!', None),
(b'\x1b$B%O%m!<%o!<%k%I!*\x1b(B', 'iso-2022-jp'),
(b'Gr\xfc\xdf Gott!', gcode)])
subject_bytes = (b'test-ja \xa4\xd8\xc5\xea\xb9\xc6\xa4\xb5'
b'\xa4\xec\xa4\xbf\xa5\xe1\xa1\xbc\xa5\xeb\xa4\xcf\xbb\xca\xb2'
b'\xf1\xbc\xd4\xa4\xce\xbe\xb5\xc7\xa7\xa4\xf2\xc2\xd4\xa4\xc3'
b'\xa4\xc6\xa4\xa4\xa4\xde\xa4\xb9')
subject = str(subject_bytes, jcode)
h = Header(subject, j, header_name="Subject")
# test a very long header
enc = h.encode()
# TK: splitting point may differ by codec design and/or Header encoding
eq(enc , """\
=?iso-2022-jp?b?dGVzdC1qYSAbJEIkWEVqOUYkNSRsJD8lYSE8JWskTztKGyhC?=
=?iso-2022-jp?b?GyRCMnE8VCROPjVHJyRyQlQkQyRGJCQkXiQ5GyhC?=""")
# TK: full decode comparison
eq(str(h).encode(jcode), subject_bytes)
def test_payload_encoding_utf8(self):
jhello = str(b'\xa5\xcf\xa5\xed\xa1\xbc\xa5\xef\xa1\xbc'
b'\xa5\xeb\xa5\xc9\xa1\xaa', 'euc-jp')
msg = Message()
msg.set_payload(jhello, 'utf-8')
ustr = msg.get_payload(decode=True).decode(msg.get_content_charset())
self.assertEqual(jhello, ustr)
def test_payload_encoding(self):
jcode = 'euc-jp'
jhello = str(b'\xa5\xcf\xa5\xed\xa1\xbc\xa5\xef\xa1\xbc'
b'\xa5\xeb\xa5\xc9\xa1\xaa', jcode)
msg = Message()
msg.set_payload(jhello, jcode)
ustr = msg.get_payload(decode=True).decode(msg.get_content_charset())
self.assertEqual(jhello, ustr)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestEmailAsianCodecs))
return suite
def test_main():
run_unittest(TestEmailAsianCodecs)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| apache-2.0 |
ryra/Personal | 3.secsif/All/cryptopals-solutions-master/set2/12/AES_128.py | 4 | 1929 | #! /usr/bin/env python
from Crypto.Cipher import AES
from binascii import a2b_base64
def pkcs_7_pad(data, final_len = None):
if final_len == None:
final_len = (len(data)/16 + 1)*16
padding_len = final_len - len(data)
return data + chr(padding_len)*padding_len
def pkcs_7_unpad(data):
padding_len = ord(data[len(data)-1])
for i in range(len(data)-padding_len,len(data)):
if ord(data[i]) != padding_len:
return data
return data[:-padding_len]
def AES_128_ECB_encrypt(data, key, pad = False):
cipher = AES.new(key, AES.MODE_ECB)
if pad:
data = pkcs_7_pad(data)
return cipher.encrypt(data)
def AES_128_ECB_decrypt(data, key, unpad = False):
cipher = AES.new(key, AES.MODE_ECB)
decr = cipher.decrypt(data)
if unpad:
decr = pkcs_7_unpad(decr)
return decr
def xor_data(A, B):
return ''.join(chr(ord(A[i])^ord(B[i])) for i in range(len(A)))
def AES_128_CBC_encrypt(data, key, iv):
data = pkcs_7_pad(data)
block_count = len(data)/16
encrypted_data = ''
prev_block = iv
for b in range(block_count):
cur_block = data[b*16:(b+1)*16]
encrypted_block = AES_128_ECB_encrypt(xor_data(cur_block, prev_block), key)
encrypted_data += encrypted_block
prev_block = encrypted_block
return encrypted_data
def AES_128_CBC_decrypt(data, key, iv):
block_count = len(data)/16
decrypted_data = ''
prev_block = iv
for b in range(block_count):
cur_block = data[b*16:(b+1)*16]
decrypted_block = AES_128_ECB_decrypt(cur_block, key)
decrypted_data += xor_data(decrypted_block, prev_block)
prev_block = cur_block
return pkcs_7_unpad(decrypted_data)
if __name__ == '__main__':
text = 'abcdefghijklmnopqrstuvwxyz!'
key = 'abcdef1234567890'
iv = '128348347dhrughdf'
if AES_128_CBC_decrypt(AES_128_CBC_encrypt(text, key, iv), key, iv) == text:
print "[+] CBC decrypt(encrypt(text))==text test passed"
else:
print "[-] CBC test failed"
| agpl-3.0 |
sathnaga/avocado-vt | selftests/unit/test_installer.py | 8 | 2175 | #!/usr/bin/python
import unittest
import os
import sys
# simple magic for using scripts within a source tree
basedir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if os.path.isdir(os.path.join(basedir, 'virttest')):
sys.path.append(basedir)
from virttest import installer
from virttest import cartesian_config
class installer_test(unittest.TestCase):
def setUp(self):
self.registry = installer.InstallerRegistry()
def test_register_get_installer(self):
install_mode = 'custom_install_mode'
virt_type = 'custom_virt_type'
class CustomVirtInstaller:
pass
self.registry.register(install_mode, CustomVirtInstaller, virt_type)
klass = self.registry.get_installer(install_mode, virt_type)
self.assertTrue(klass is CustomVirtInstaller)
def test_register_get_installer_default(self):
install_mode = 'base_install_mode'
class BaseVirtInstaller:
pass
self.registry.register(install_mode, BaseVirtInstaller)
klass = self.registry.get_installer(install_mode,
get_default_virt=True)
self.assertTrue(klass is BaseVirtInstaller)
klass = self.registry.get_installer(install_mode,
virt=None,
get_default_virt=True)
self.assertTrue(klass is BaseVirtInstaller)
def test_make_installer(self):
config = """install_mode = test_install_mode
vm_type = test"""
class Installer:
def __init__(self, mode, name, test, params):
pass
installer.INSTALLER_REGISTRY.register('test_install_mode',
Installer,
'test')
config_parser = cartesian_config.Parser()
config_parser.parse_string(config)
params = next(config_parser.get_dicts())
instance = installer.make_installer("test_install_mode_test", params)
self.assertTrue(isinstance(instance, Installer))
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
FedoraScientific/salome-smesh | src/SMESH_SWIG/ex11_grid3partition.py | 1 | 3248 | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2007-2014 CEA/DEN, EDF R&D, OPEN CASCADE
#
# Copyright (C) 2003-2007 OPEN CASCADE, EADS/CCR, LIP6, CEA/DEN,
# CEDRAT, EDF R&D, LEG, PRINCIPIA R&D, BUREAU VERITAS
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : [email protected]
#
# =======================================
#
import salome
salome.salome_init()
import GEOM
from salome.geom import geomBuilder
geompy = geomBuilder.New(salome.myStudy)
import SMESH, SALOMEDS
from salome.smesh import smeshBuilder
smesh = smeshBuilder.New(salome.myStudy)
# Geometry
# ========
# grid compound of 3 x 3 elements
# an element is compound of 3 cylinders concentriques
# an element is centered in a square of the grid
# the smaller cylinder is a hole
# prism the grid, and mesh it in hexahedral way
# Values
# ------
g_x = 0
g_y = 0
g_z = 0
g_arete = 50
g_hauteur = 30
g_rayon1 = 20
g_rayon2 = 30
g_rayon3 = 40
g_grid = 3
g_trim = 1000
# Element
# -------
e_boite = geompy.MakeBox(g_x-g_arete, g_y-g_hauteur, g_z-g_arete, g_x+g_arete, g_y+g_hauteur, g_z+g_arete)
e_hauteur = 2*g_hauteur
e_centre = geompy.MakeVertex(g_x, g_y-g_hauteur, g_z)
e_dir = geompy.MakeVectorDXDYDZ(0, 1, 0)
e_cyl1 = geompy.MakeCylinder(e_centre, e_dir, g_rayon3, e_hauteur)
e_blo1 = geompy.MakeCut(e_boite, e_cyl1)
e_cyl2 = geompy.MakeCylinder(e_centre, e_dir, g_rayon2, e_hauteur)
e_blo2 = geompy.MakeCut(e_cyl1, e_cyl2)
e_cyl3 = geompy.MakeCylinder(e_centre, e_dir, g_rayon1, e_hauteur)
e_blo3 = geompy.MakeCut(e_cyl2, e_cyl3)
# Partition and repair
# --------------------
p_tools = []
p_tools.append(geompy.MakePlane(e_centre, geompy.MakeVectorDXDYDZ( 1, 0, 1), g_trim))
p_tools.append(geompy.MakePlane(e_centre, geompy.MakeVectorDXDYDZ(-1, 0, 1), g_trim))
p_part = geompy.MakePartition([e_blo1, e_blo2, e_blo3], p_tools, [], [], geompy.ShapeType["SOLID"])
p_element = geompy.RemoveExtraEdges(p_part, doUnionFaces=True)
# Grid and glue
# -------------
grid = geompy.MakeMultiTranslation2D(p_element, geompy.MakeVectorDXDYDZ(1, 0, 0), 2*g_arete, g_grid, geompy.MakeVectorDXDYDZ(0, 0, 1), 2*g_arete, g_grid)
piece = geompy.MakeGlueFaces(grid, 1e-5)
# Add in study
# ------------
piece_id = geompy.addToStudy(piece, "ex11_grid3partition")
# Meshing
# =======
# Create a hexahedral mesh
# ------------------------
hexa = smesh.Mesh(piece, "ex11_grid3partition:hexa")
algo = hexa.Segment()
algo.NumberOfSegments(3)
hexa.Quadrangle()
hexa.Hexahedron()
# Mesh calculus
# -------------
hexa.Compute()
| lgpl-2.1 |
neumerance/deploy | openstack_dashboard/dashboards/project/firewalls/views.py | 4 | 10365 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: KC Wang, Big Switch Networks
import logging
import re
from django.core.urlresolvers import reverse_lazy # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon import tabs
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.firewalls \
import forms as fw_forms
from openstack_dashboard.dashboards.project.firewalls \
import tabs as fw_tabs
from openstack_dashboard.dashboards.project.firewalls \
import workflows as fw_workflows
InsertRuleToPolicy = fw_forms.InsertRuleToPolicy
RemoveRuleFromPolicy = fw_forms.RemoveRuleFromPolicy
UpdateFirewall = fw_forms.UpdateFirewall
UpdatePolicy = fw_forms.UpdatePolicy
UpdateRule = fw_forms.UpdateRule
FirewallDetailsTabs = fw_tabs.FirewallDetailsTabs
FirewallTabs = fw_tabs.FirewallTabs
PolicyDetailsTabs = fw_tabs.PolicyDetailsTabs
RuleDetailsTabs = fw_tabs.RuleDetailsTabs
AddFirewall = fw_workflows.AddFirewall
AddPolicy = fw_workflows.AddPolicy
AddRule = fw_workflows.AddRule
LOG = logging.getLogger(__name__)
class IndexView(tabs.TabView):
tab_group_class = (FirewallTabs)
template_name = 'project/firewalls/details_tabs.html'
def post(self, request, *args, **kwargs):
obj_ids = request.POST.getlist('object_ids')
action = request.POST['action']
obj_type = re.search('.delete([a-z]+)', action).group(1)
if not obj_ids:
obj_ids.append(re.search('([0-9a-z-]+)$', action).group(1))
if obj_type == 'rule':
for obj_id in obj_ids:
try:
api.fwaas.rule_delete(request, obj_id)
messages.success(request, 'Deleted rule %s' % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete rule. %s' % e))
if obj_type == 'policy':
for obj_id in obj_ids:
try:
api.fwaas.policy_delete(request, obj_id)
messages.success(request, 'Deleted policy %s' % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete policy. %s' % e))
if obj_type == 'firewall':
for obj_id in obj_ids:
try:
api.fwaas.firewall_delete(request, obj_id)
messages.success(request, 'Deleted firewall %s' % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete firewall. %s' % e))
return self.get(request, *args, **kwargs)
class AddRuleView(workflows.WorkflowView):
workflow_class = AddRule
template_name = "project/firewalls/addrule.html"
class AddPolicyView(workflows.WorkflowView):
workflow_class = AddPolicy
template_name = "project/firewalls/addpolicy.html"
class AddFirewallView(workflows.WorkflowView):
workflow_class = AddFirewall
template_name = "project/firewalls/addfirewall.html"
class RuleDetailsView(tabs.TabView):
tab_group_class = (RuleDetailsTabs)
template_name = 'project/firewalls/details_tabs.html'
class PolicyDetailsView(tabs.TabView):
tab_group_class = (PolicyDetailsTabs)
template_name = 'project/firewalls/details_tabs.html'
class FirewallDetailsView(tabs.TabView):
tab_group_class = (FirewallDetailsTabs)
template_name = 'project/firewalls/details_tabs.html'
class UpdateRuleView(forms.ModalFormView):
form_class = UpdateRule
template_name = "project/firewalls/updaterule.html"
context_object_name = 'rule'
success_url = reverse_lazy("horizon:project:firewalls:index")
def get_context_data(self, **kwargs):
context = super(UpdateRuleView, self).get_context_data(**kwargs)
context['rule_id'] = self.kwargs['rule_id']
obj = self._get_object()
if obj:
context['name'] = obj.name
return context
def _get_object(self, *args, **kwargs):
if not hasattr(self, "_object"):
rule_id = self.kwargs['rule_id']
try:
self._object = api.fwaas.rule_get(self.request, rule_id)
self._object.set_id_as_name_if_empty()
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve rule details.')
exceptions.handle(self.request, msg, redirect=redirect)
return self._object
def get_initial(self):
rule = self._get_object()
initial = rule.get_dict()
return initial
class UpdatePolicyView(forms.ModalFormView):
form_class = UpdatePolicy
template_name = "project/firewalls/updatepolicy.html"
context_object_name = 'policy'
success_url = reverse_lazy("horizon:project:firewalls:index")
def get_context_data(self, **kwargs):
context = super(UpdatePolicyView, self).get_context_data(**kwargs)
context["policy_id"] = self.kwargs['policy_id']
obj = self._get_object()
if obj:
context['name'] = obj.name
return context
def _get_object(self, *args, **kwargs):
if not hasattr(self, "_object"):
policy_id = self.kwargs['policy_id']
try:
self._object = api.fwaas.policy_get(self.request, policy_id)
self._object.set_id_as_name_if_empty()
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve policy details.')
exceptions.handle(self.request, msg, redirect=redirect)
return self._object
def get_initial(self):
policy = self._get_object()
initial = policy.get_dict()
return initial
class UpdateFirewallView(forms.ModalFormView):
form_class = UpdateFirewall
template_name = "project/firewalls/updatefirewall.html"
context_object_name = 'firewall'
success_url = reverse_lazy("horizon:project:firewalls:index")
def get_context_data(self, **kwargs):
context = super(UpdateFirewallView, self).get_context_data(**kwargs)
context["firewall_id"] = self.kwargs['firewall_id']
obj = self._get_object()
if obj:
context['name'] = obj.name
return context
def _get_object(self, *args, **kwargs):
if not hasattr(self, "_object"):
firewall_id = self.kwargs['firewall_id']
try:
self._object = api.fwaas.firewall_get(self.request,
firewall_id)
self._object.set_id_as_name_if_empty()
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve firewall details.')
exceptions.handle(self.request, msg, redirect=redirect)
return self._object
def get_initial(self):
firewall = self._get_object()
initial = firewall.get_dict()
return initial
class InsertRuleToPolicyView(forms.ModalFormView):
form_class = InsertRuleToPolicy
template_name = "project/firewalls/insert_rule_to_policy.html"
context_object_name = 'policy'
success_url = reverse_lazy("horizon:project:firewalls:index")
def get_context_data(self, **kwargs):
context = super(InsertRuleToPolicyView,
self).get_context_data(**kwargs)
context["policy_id"] = self.kwargs['policy_id']
obj = self._get_object()
if obj:
context['name'] = obj.name
return context
def _get_object(self, *args, **kwargs):
if not hasattr(self, "_object"):
policy_id = self.kwargs['policy_id']
try:
self._object = api.fwaas.policy_get(self.request, policy_id)
self._object.set_id_as_name_if_empty()
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve policy details.')
exceptions.handle(self.request, msg, redirect=redirect)
return self._object
def get_initial(self):
policy = self._get_object()
initial = policy.get_dict()
initial['policy_id'] = initial['id']
return initial
class RemoveRuleFromPolicyView(forms.ModalFormView):
form_class = RemoveRuleFromPolicy
template_name = "project/firewalls/remove_rule_from_policy.html"
context_object_name = 'policy'
success_url = reverse_lazy("horizon:project:firewalls:index")
def get_context_data(self, **kwargs):
context = super(RemoveRuleFromPolicyView,
self).get_context_data(**kwargs)
context["policy_id"] = self.kwargs['policy_id']
obj = self._get_object()
if obj:
context['name'] = obj.name
return context
def _get_object(self, *args, **kwargs):
if not hasattr(self, "_object"):
policy_id = self.kwargs['policy_id']
try:
self._object = api.fwaas.policy_get(self.request, policy_id)
self._object.set_id_as_name_if_empty()
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve policy details.')
exceptions.handle(self.request, msg, redirect=redirect)
return self._object
def get_initial(self):
policy = self._get_object()
initial = policy.get_dict()
initial['policy_id'] = initial['id']
return initial
| apache-2.0 |
windskyer/nova | nova/api/openstack/compute/legacy_v2/contrib/instance_usage_audit_log.py | 66 | 5751 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from oslo_config import cfg
import webob.exc
from nova.api.openstack import extensions
from nova import compute
from nova import context as nova_context
from nova.i18n import _
from nova import utils
CONF = cfg.CONF
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
authorize = extensions.extension_authorizer('compute',
'instance_usage_audit_log')
class InstanceUsageAuditLogController(object):
def __init__(self):
self.host_api = compute.HostAPI()
def index(self, req):
context = req.environ['nova.context']
authorize(context)
task_log = self._get_audit_task_logs(context)
return {'instance_usage_audit_logs': task_log}
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
if '.' in id:
before_date = datetime.datetime.strptime(str(id),
"%Y-%m-%d %H:%M:%S.%f")
else:
before_date = datetime.datetime.strptime(str(id),
"%Y-%m-%d %H:%M:%S")
except ValueError:
msg = _("Invalid timestamp for date %s") % id
raise webob.exc.HTTPBadRequest(explanation=msg)
task_log = self._get_audit_task_logs(context,
before=before_date)
return {'instance_usage_audit_log': task_log}
def _get_audit_task_logs(self, context, begin=None, end=None,
before=None):
"""Returns a full log for all instance usage audit tasks on all
computes.
:param begin: datetime beginning of audit period to get logs for,
Defaults to the beginning of the most recently completed
audit period prior to the 'before' date.
:param end: datetime ending of audit period to get logs for,
Defaults to the ending of the most recently completed
audit period prior to the 'before' date.
:param before: By default we look for the audit period most recently
completed before this datetime. Has no effect if both begin and end
are specified.
"""
# NOTE(alex_xu): back-compatible with db layer hard-code admin
# permission checks.
nova_context.require_admin_context(context)
defbegin, defend = utils.last_completed_audit_period(before=before)
if begin is None:
begin = defbegin
if end is None:
end = defend
task_logs = self.host_api.task_log_get_all(context,
"instance_usage_audit",
begin, end)
# We do this this way to include disabled compute services,
# which can have instances on them. (mdragon)
filters = {'topic': CONF.compute_topic}
services = self.host_api.service_get_all(context, filters=filters)
hosts = set(serv['host'] for serv in services)
seen_hosts = set()
done_hosts = set()
running_hosts = set()
total_errors = 0
total_items = 0
for tlog in task_logs:
seen_hosts.add(tlog['host'])
if tlog['state'] == "DONE":
done_hosts.add(tlog['host'])
if tlog['state'] == "RUNNING":
running_hosts.add(tlog['host'])
total_errors += tlog['errors']
total_items += tlog['task_items']
log = {tl['host']: dict(state=tl['state'],
instances=tl['task_items'],
errors=tl['errors'],
message=tl['message'])
for tl in task_logs}
missing_hosts = hosts - seen_hosts
overall_status = "%s hosts done. %s errors." % (
'ALL' if len(done_hosts) == len(hosts)
else "%s of %s" % (len(done_hosts), len(hosts)),
total_errors)
return dict(period_beginning=str(begin),
period_ending=str(end),
num_hosts=len(hosts),
num_hosts_done=len(done_hosts),
num_hosts_running=len(running_hosts),
num_hosts_not_run=len(missing_hosts),
hosts_not_run=list(missing_hosts),
total_instances=total_items,
total_errors=total_errors,
overall_status=overall_status,
log=log)
class Instance_usage_audit_log(extensions.ExtensionDescriptor):
"""Admin-only Task Log Monitoring."""
name = "OSInstanceUsageAuditLog"
alias = "os-instance_usage_audit_log"
namespace = "http://docs.openstack.org/ext/services/api/v1.1"
updated = "2012-07-06T01:00:00Z"
def get_resources(self):
ext = extensions.ResourceExtension('os-instance_usage_audit_log',
InstanceUsageAuditLogController())
return [ext]
| gpl-2.0 |
zamattiac/osf.io | framework/flask/__init__.py | 22 | 2444 | # -*- coding: utf-8 -*-
import os
from flask import (Flask, request, jsonify, render_template, # noqa
render_template_string, Blueprint, send_file, abort, make_response,
redirect as flask_redirect, url_for, send_from_directory, current_app
)
import furl
from website import settings
# Create app
app = Flask(
__name__,
static_folder=settings.STATIC_FOLDER,
static_url_path=settings.STATIC_URL_PATH,
)
# Pull debug mode from settings
app.debug = settings.DEBUG_MODE
app.config['SENTRY_TAGS'] = {'App': 'web'}
app.config['SENTRY_RELEASE'] = settings.VERSION
# Set up static routing for addons
# TODO: Handle this in nginx
addon_base_path = os.path.abspath('website/addons')
@app.route('/static/addons/<addon>/<path:filename>')
def addon_static(addon, filename):
addon_path = os.path.join(addon_base_path, addon, 'static')
return send_from_directory(addon_path, filename)
def add_handler(app, handler_name, func, key=None):
"""Add handler to Flask application if handler has not already been added.
Used to avoid attaching the same handlers more than once, e.g. when setting
up multiple applications during testing.
:param app: Flask app
:param handler_name: Name of handler type, e.g. 'before_request'
:param func: Handler function to attach
:param key: Blueprint name
"""
handler_adder = getattr(app, handler_name)
handler_funcs_name = '{0}_funcs'.format(handler_name)
handler_funcs = getattr(app, handler_funcs_name)
if func not in handler_funcs.get(key, []):
handler_adder(func)
def add_handlers(app, handlers, key=None):
"""Add multiple handlers to application.
:param app: Flask application
:param handlers: Mapping from handler names to handler functions
"""
for handler_name, func in handlers.iteritems():
add_handler(app, handler_name, func, key=key)
def redirect(location, code=302):
"""Redirect the client to a desired location. Behaves the same
as Flask's :func:`flask.redirect` function with an awareness of
OSF view-only links.
IMPORTANT: This function should always be used instead of
flask.redirect to ensure the correct behavior of view-only
links.
"""
view_only = request.args.get('view_only', '')
if view_only:
url = furl.furl(location)
url.args['view_only'] = view_only
location = url.url
return flask_redirect(location, code=code)
| apache-2.0 |
potsmaster/cinder | cinder/tests/unit/keymgr/test_key.py | 18 | 1908 | # Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test cases for the key classes.
"""
import array
from cinder.keymgr import key
from cinder import test
class KeyTestCase(test.TestCase):
def _create_key(self):
raise NotImplementedError()
def setUp(self):
super(KeyTestCase, self).setUp()
self.key = self._create_key()
class SymmetricKeyTestCase(KeyTestCase):
def _create_key(self):
return key.SymmetricKey(self.algorithm, self.encoded)
def setUp(self):
self.algorithm = 'AES'
self.encoded = array.array('B', ('0' * 64).decode('hex')).tolist()
super(SymmetricKeyTestCase, self).setUp()
def test_get_algorithm(self):
self.assertEqual(self.algorithm, self.key.get_algorithm())
def test_get_format(self):
self.assertEqual('RAW', self.key.get_format())
def test_get_encoded(self):
self.assertEqual(self.encoded, self.key.get_encoded())
def test___eq__(self):
self.assertTrue(self.key == self.key)
self.assertFalse(self.key is None)
self.assertFalse(None == self.key)
def test___ne__(self):
self.assertFalse(self.key != self.key)
self.assertTrue(self.key is not None)
self.assertTrue(None != self.key)
| apache-2.0 |
arpan-chavda/rh_app | libs/venus/planet/vendor/html5lib/sanitizer.py | 3 | 14430 | import re
from xml.sax.saxutils import escape, unescape
from tokenizer import HTMLTokenizer
from constants import tokenTypes
class HTMLSanitizerMixin(object):
""" sanitization of XHTML+MathML+SVG and of inline style attributes."""
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figure', 'footer', 'font', 'form', 'header', 'h1', 'h2', 'h3', 'h4',
'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins', 'keygen', 'kbd',
'label', 'legend', 'li', 'm', 'map', 'menu', 'meter', 'multicol',
'nav', 'nextid', 'ol', 'output', 'optgroup', 'option', 'p', 'pre',
'progress', 'q', 's', 'samp', 'section', 'select', 'small', 'sound',
'source', 'spacer', 'span', 'strike', 'strong', 'sub', 'sup', 'table',
'tbody', 'td', 'textarea', 'time', 'tfoot', 'th', 'thead', 'tr', 'tt',
'u', 'ul', 'var', 'video']
mathml_elements = ['maction', 'math', 'merror', 'mfrac', 'mi',
'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom',
'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', 'msub',
'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none']
svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'clipPath', 'circle', 'defs', 'desc', 'ellipse',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph',
'mpath', 'path', 'polygon', 'polyline', 'radialGradient', 'rect',
'set', 'stop', 'svg', 'switch', 'text', 'title', 'tspan', 'use']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color',
'cols', 'colspan', 'compact', 'contenteditable', 'controls', 'coords',
'data', 'datafld', 'datapagesize', 'datasrc', 'datetime', 'default',
'delay', 'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end',
'face', 'for', 'form', 'frame', 'galleryimg', 'gutter', 'headers',
'height', 'hidefocus', 'hidden', 'high', 'href', 'hreflang', 'hspace',
'icon', 'id', 'inputmode', 'ismap', 'keytype', 'label', 'leftspacing',
'lang', 'list', 'longdesc', 'loop', 'loopcount', 'loopend',
'loopstart', 'low', 'lowsrc', 'max', 'maxlength', 'media', 'method',
'min', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'open',
'optimum', 'pattern', 'ping', 'point-size', 'prompt', 'pqg',
'radiogroup', 'readonly', 'rel', 'repeat-max', 'repeat-min',
'replace', 'required', 'rev', 'rightspacing', 'rows', 'rowspan',
'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start',
'step', 'style', 'summary', 'suppress', 'tabindex', 'target',
'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
'width', 'wrap', 'xml:lang']
mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'equalcolumns', 'equalrows', 'fence',
'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', 'lspace',
'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', 'maxsize',
'minsize', 'other', 'rowalign', 'rowalign', 'rowalign', 'rowlines',
'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show',
'xlink:type', 'xmlns', 'xmlns:xlink']
svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'clip-path', 'color', 'color-rendering', 'content', 'cx',
'cy', 'd', 'dx', 'dy', 'descent', 'display', 'dur', 'end', 'fill',
'fill-opacity', 'fill-rule', 'font-family', 'font-size',
'font-stretch', 'font-style', 'font-variant', 'font-weight', 'from',
'fx', 'fy', 'g1', 'g2', 'glyph-name', 'gradientUnits', 'hanging',
'height', 'horiz-adv-x', 'horiz-origin-x', 'id', 'ideographic', 'k',
'keyPoints', 'keySplines', 'keyTimes', 'lang', 'marker-end',
'marker-mid', 'marker-start', 'markerHeight', 'markerUnits',
'markerWidth', 'mathematical', 'max', 'min', 'name', 'offset',
'opacity', 'orient', 'origin', 'overline-position',
'overline-thickness', 'panose-1', 'path', 'pathLength', 'points',
'preserveAspectRatio', 'r', 'refX', 'refY', 'repeatCount',
'repeatDur', 'requiredExtensions', 'requiredFeatures', 'restart',
'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', 'stop-color',
'stop-opacity', 'strikethrough-position', 'strikethrough-thickness',
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap',
'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity',
'stroke-width', 'systemLanguage', 'target', 'text-anchor', 'to',
'transform', 'type', 'u1', 'u2', 'underline-position',
'underline-thickness', 'unicode', 'unicode-range', 'units-per-em',
'values', 'version', 'viewBox', 'visibility', 'width', 'widths', 'x',
'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y',
'y1', 'y2', 'zoomAndPan']
attr_val_is_uri = ['href', 'src', 'cite', 'action', 'longdesc',
'xlink:href', 'xml:base']
svg_attr_val_allows_ref = ['clip-path', 'color-profile', 'cursor', 'fill',
'filter', 'marker', 'marker-start', 'marker-mid', 'marker-end',
'mask', 'stroke']
svg_allow_local_href = ['altGlyph', 'animate', 'animateColor',
'animateMotion', 'animateTransform', 'cursor', 'feImage', 'filter',
'linearGradient', 'pattern', 'radialGradient', 'textpath', 'tref',
'set', 'use']
acceptable_css_properties = ['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width']
acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow']
acceptable_svg_properties = [ 'fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity']
acceptable_protocols = [ 'ed2k', 'ftp', 'http', 'https', 'irc',
'mailto', 'news', 'gopher', 'nntp', 'telnet', 'webcal',
'xmpp', 'callto', 'feed', 'urn', 'aim', 'rsync', 'tag',
'ssh', 'sftp', 'rtsp', 'afs' ]
# subclasses may define their own versions of these constants
allowed_elements = acceptable_elements + mathml_elements + svg_elements
allowed_attributes = acceptable_attributes + mathml_attributes + svg_attributes
allowed_css_properties = acceptable_css_properties
allowed_css_keywords = acceptable_css_keywords
allowed_svg_properties = acceptable_svg_properties
allowed_protocols = acceptable_protocols
# Sanitize the +html+, escaping all elements not in ALLOWED_ELEMENTS, and
# stripping out all # attributes not in ALLOWED_ATTRIBUTES. Style
# attributes are parsed, and a restricted set, # specified by
# ALLOWED_CSS_PROPERTIES and ALLOWED_CSS_KEYWORDS, are allowed through.
# attributes in ATTR_VAL_IS_URI are scanned, and only URI schemes specified
# in ALLOWED_PROTOCOLS are allowed.
#
# sanitize_html('<script> do_nasty_stuff() </script>')
# => <script> do_nasty_stuff() </script>
# sanitize_html('<a href="javascript: sucker();">Click here for $100</a>')
# => <a>Click here for $100</a>
def sanitize_token(self, token):
# accommodate filters which use token_type differently
token_type = token["type"]
if token_type in tokenTypes.keys():
token_type = tokenTypes[token_type]
if token_type in (tokenTypes["StartTag"], tokenTypes["EndTag"],
tokenTypes["EmptyTag"]):
if token["name"] in self.allowed_elements:
if token.has_key("data"):
attrs = dict([(name,val) for name,val in
token["data"][::-1]
if name in self.allowed_attributes])
for attr in self.attr_val_is_uri:
if not attrs.has_key(attr):
continue
val_unescaped = re.sub("[`\000-\040\177-\240\s]+", '',
unescape(attrs[attr])).lower()
#remove replacement characters from unescaped characters
val_unescaped = val_unescaped.replace(u"\ufffd", "")
if (re.match("^[a-z0-9][-+.a-z0-9]*:",val_unescaped) and
(val_unescaped.split(':')[0] not in
self.allowed_protocols)):
del attrs[attr]
for attr in self.svg_attr_val_allows_ref:
if attr in attrs:
attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)',
' ',
unescape(attrs[attr]))
if (token["name"] in self.svg_allow_local_href and
'xlink:href' in attrs and re.search('^\s*[^#\s].*',
attrs['xlink:href'])):
del attrs['xlink:href']
if attrs.has_key('style'):
attrs['style'] = self.sanitize_css(attrs['style'])
token["data"] = [[name,val] for name,val in attrs.items()]
return token
else:
if token_type == tokenTypes["EndTag"]:
token["data"] = "</%s>" % token["name"]
elif token["data"]:
attrs = ''.join([' %s="%s"' % (k,escape(v)) for k,v in token["data"]])
token["data"] = "<%s%s>" % (token["name"],attrs)
else:
token["data"] = "<%s>" % token["name"]
if token.get("selfClosing"):
token["data"]=token["data"][:-1] + "/>"
if token["type"] in tokenTypes.keys():
token["type"] = "Characters"
else:
token["type"] = tokenTypes["Characters"]
del token["name"]
return token
elif token_type == tokenTypes["Comment"]:
pass
else:
return token
def sanitize_css(self, style):
# disallow urls
style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): return ''
if not re.match("^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style): return ''
clean = []
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
if not value: continue
if prop.lower() in self.allowed_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background','border','margin',
'padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not re.match("^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$",keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif prop.lower() in self.allowed_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
class HTMLSanitizer(HTMLTokenizer, HTMLSanitizerMixin):
def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
lowercaseElementName=False, lowercaseAttrName=False):
#Change case matching defaults as we only output lowercase html anyway
#This solution doesn't seem ideal...
HTMLTokenizer.__init__(self, stream, encoding, parseMeta, useChardet,
lowercaseElementName, lowercaseAttrName)
def __iter__(self):
for token in HTMLTokenizer.__iter__(self):
token = self.sanitize_token(token)
if token:
yield token
| gpl-3.0 |
aeroevan/snakebite | test/commandlineparser_test.py | 2 | 41119 | # -*- coding: utf-8 -*-
# Copyright (c) 2013 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import unittest2
import os
import pwd
import json
import sys
import traceback
from mock import MagicMock, patch, mock_open
from snakebite.config import HDFSConfig
from snakebite.commandlineparser import Commands, CommandLineParser
from snakebite.namenode import Namenode
from config_test import ConfigTest
class CommandLineParserTest(unittest2.TestCase):
def setUp(self):
self.parser = CommandLineParser()
self.default_dir = os.path.join("/user", pwd.getpwuid(os.getuid())[0])
def test_general_options(self):
parser = self.parser
output = parser.parse('ls some_folder'.split())
self.assertFalse(output.debug)
self.assertFalse(output.human)
self.assertFalse(output.json)
self.assertEqual(output.namenode, None)
self.assertEqual(output.port, None)
#each option
output = parser.parse('-D ls some_folder'.split())
self.assertTrue(output.debug)
output = parser.parse('--debug ls some_folder'.split())
self.assertTrue(output.debug)
output = parser.parse('-j ls some_folder'.split())
self.assertTrue(output.json)
output = parser.parse('--json ls some_folder'.split())
self.assertTrue(output.json)
output = parser.parse('-n namenode_fqdn ls some_folder'.split()) # what are typical values for namenodes?
self.assertEqual(output.namenode, "namenode_fqdn")
output = parser.parse('--namenode namenode_fqdn ls some_folder'.split())
self.assertEqual(output.namenode, "namenode_fqdn")
output = parser.parse('-p 1234 ls some_folder'.split())
self.assertEqual(output.port, 1234)
output = parser.parse('--port 1234 ls some_folder'.split())
self.assertEqual(output.port, 1234)
output = parser.parse('-V 4 ls some_folder'.split())
self.assertEqual(output.version, 4)
output = parser.parse('--version 4 ls some_folder'.split())
self.assertEqual(output.version, 4)
#all options
output = parser.parse('-D -j -n namenode_fqdn -p 1234 -V 4 ls some_folder'.split())
self.assertTrue(output.debug)
self.assertTrue(output.json)
self.assertEqual(output.namenode, "namenode_fqdn")
self.assertEqual(output.port, 1234)
self.assertEqual(output.version, 4)
#options in illegal position
with self.assertRaises(SystemExit):
parser.parse('ls -D some_folder'.split())
with self.assertRaises(SystemExit):
parser.parse('ls some_folder -D'.split())
def test_ls(self):
parser = self.parser
#no dir
output = parser.parse('ls'.split())
self.assertEqual(output.command, 'ls')
self.assertEqual(output.dir, [self.default_dir])
#one dir
output = parser.parse('ls some_dir'.split())
self.assertEqual(output.dir, ['some_dir'])
#multiple dirs
output = parser.parse('ls dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
#specific commands
output = parser.parse('ls -d -R -s -h some_dir'.split())
self.assertTrue(output.directory)
self.assertTrue(output.recurse)
self.assertTrue(output.summary)
self.assertTrue(output.human)
self.assertEqual(output.dir, ['some_dir'])
#multiple slashes
output = parser.parse('ls ///dir1 //dir2 /dir3'.split())
self.assertEqual(output.dir, ['///dir1', '//dir2', '/dir3'])
def test_mkdir(self):
parser = self.parser
#no dir
with self.assertRaises(SystemExit):
parser.parse('mkdir'.split())
#one dir
output = parser.parse('mkdir some_dir'.split())
self.assertEqual(output.command, 'mkdir')
self.assertEqual(output.dir, ['some_dir'])
#multiple dirs
output = parser.parse('mkdir dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
def test_mkdirp(self):
parser = self.parser
#no dir
with self.assertRaises(SystemExit):
parser.parse('mkdirp'.split())
#one dir
output = parser.parse('mkdirp some_dir'.split())
self.assertEqual(output.command, 'mkdirp')
self.assertEqual(output.dir, ['some_dir'])
#multiple dirs
output = parser.parse('mkdirp dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
def test_chown(self):
parser = self.parser
#no dir and/or no owner
with self.assertRaises(SystemExit):
parser.parse('chown'.split())
with self.assertRaises(SystemExit):
parser.parse('chown owner_or_dir'.split())
#one dir
output = parser.parse('chown root some_dir'.split())
self.assertEqual(output.command, 'chown')
self.assertEqual(output.dir, ['some_dir'])
self.assertEqual(output.single_arg, 'root')
#multiple dirs
output = parser.parse('chown root dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
self.assertEqual(output.single_arg, 'root')
#recursive
output = parser.parse('chown -R root some_dir'.split())
self.assertTrue(output.recurse)
def test_chmod(self):
parser = self.parser
#no dir and/or no mode
with self.assertRaises(SystemExit):
parser.parse('chmod'.split())
with self.assertRaises(SystemExit):
parser.parse('chmod mode_or_dir'.split())
#one dir
output = parser.parse('chmod 664 some_dir'.split())
self.assertEqual(output.command, 'chmod')
self.assertEqual(output.dir, ['some_dir'])
self.assertEqual(output.single_int_arg, 664)
#wrong type for mode argument
with self.assertRaises(SystemExit):
parser.parse('chmod not_an_int some_dir'.split())
#multiple dirs
output = parser.parse('chmod 664 dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
self.assertEqual(output.single_int_arg, 664)
#recursive
output = parser.parse('chmod -R 664 some_dir'.split())
self.assertTrue(output.recurse)
def test_chgrp(self):
parser = self.parser
#no dir and/or no group
with self.assertRaises(SystemExit):
parser.parse('chgrp'.split())
with self.assertRaises(SystemExit):
parser.parse('chgrp group_or_dir'.split())
#one dir
output = parser.parse('chgrp group some_dir'.split())
self.assertEqual(output.command, 'chgrp')
self.assertEqual(output.dir, ['some_dir'])
self.assertEqual(output.single_arg, 'group')
#multiple dirs
output = parser.parse('chgrp group dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
self.assertEqual(output.single_arg, 'group')
#recursive
output = parser.parse('chgrp -R group some_dir'.split())
self.assertTrue(output.recurse)
def test_count(self):
parser = self.parser
#no dir
output = parser.parse('count'.split())
self.assertEqual(output.command, 'count')
self.assertEqual(output.dir, [self.default_dir])
#one dir
output = parser.parse('count some_dir'.split())
self.assertEqual(output.dir, ['some_dir'])
#multiple dirs
output = parser.parse('count dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
# Human output
output = parser.parse('count -h dir1 dir2 dir3'.split())
self.assertTrue(output.human)
def test_df(self):
parser = self.parser
#no dir
output = parser.parse('df'.split())
self.assertEqual(output.command, 'df')
# Human output
output = parser.parse('df -h'.split())
self.assertEqual(output.command, 'df')
self.assertTrue(output.human)
with self.assertRaises(SystemExit):
parser.parse('df some_additional_argument'.split())
def test_du(self):
parser = self.parser
#no dir
output = parser.parse('du'.split())
self.assertEqual(output.command, 'du')
self.assertEqual(output.dir, [self.default_dir])
#one dir
output = parser.parse('du some_dir'.split())
self.assertEqual(output.dir, ['some_dir'])
#multiple dirs
output = parser.parse('du dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
#summary
output = parser.parse('du -s some_dir'.split())
self.assertTrue(output.summary)
#human
output = parser.parse('du -h some_dir'.split())
self.assertTrue(output.human)
def test_mv(self):
parser = self.parser
#no source and/or no destination
with self.assertRaises(SystemExit):
parser.parse('mv'.split())
with self.assertRaises(SystemExit):
parser.parse('mv src_or_dest'.split())
#one source
output = parser.parse('mv source some_dest'.split())
self.assertEqual(output.command, 'mv')
self.assertEqual(output.dir, ['source'])
self.assertEqual(output.single_arg, 'some_dest')
#multiple sources
output = parser.parse('mv source1 source2 source3 some_dest'.split())
self.assertEqual(output.dir, ['source1', 'source2', 'source3'])
self.assertEqual(output.single_arg, 'some_dest')
def test_rm(self):
parser = self.parser
#no dir and/or no group
with self.assertRaises(SystemExit):
parser.parse('rm'.split())
#one dir
output = parser.parse('rm some_dir'.split())
self.assertEqual(output.command, 'rm')
self.assertEqual(output.dir, ['some_dir'])
#multiple dirs
output = parser.parse('rm dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
#recursive
output = parser.parse('rm -R some_dir'.split())
self.assertTrue(output.recurse)
#skiptrash
output = parser.parse('rm -S some_dir'.split())
self.assertTrue(output.skiptrash)
#skiptrash
output = parser.parse('rm --skiptrash some_dir'.split())
self.assertTrue(output.skiptrash)
#usetrash
output = parser.parse('rm -T some_dir'.split())
self.assertTrue(output.usetrash)
#usetrash
output =parser.parse('rm --usetrash some_dir'.split())
self.assertTrue(output.usetrash)
#usetrash & skiptrash
output = parser.parse('rm --usetrash --skiptrash some_dir'.split())
self.assertTrue(output.usetrash)
self.assertTrue(output.skiptrash)
def test_touchz(self):
parser = self.parser
#no dir and/or no group
with self.assertRaises(SystemExit):
parser.parse('touchz'.split())
#one dir
output = parser.parse('touchz some_dir'.split())
self.assertEqual(output.command, 'touchz')
self.assertEqual(output.dir, ['some_dir'])
#multiple dirs
output = parser.parse('touchz dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
def test_serverdefaults(self):
parser = self.parser
#no arg
output = parser.parse('serverdefaults'.split())
self.assertEqual(output.command, 'serverdefaults')
#too many args
with self.assertRaises(SystemExit):
parser.parse('serverdefaults some_additional_argument'.split())
def test_rmdir(self):
parser = self.parser
#no dir and/or no group
with self.assertRaises(SystemExit):
parser.parse('rmdir'.split())
#one dir
output = parser.parse('rmdir some_dir'.split())
self.assertEqual(output.command, 'rmdir')
self.assertEqual(output.dir, ['some_dir'])
#multiple dirs
output = parser.parse('rmdir dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
def test_setrep(self):
parser = self.parser
#no dir and/or no replication factor
with self.assertRaises(SystemExit):
parser.parse('setrep'.split())
with self.assertRaises(SystemExit):
parser.parse('setrep some_dir'.split())
with self.assertRaises(SystemExit):
parser.parse('setrep 3'.split())
#one dir
output = parser.parse('setrep 3 some_dir'.split())
self.assertEqual(output.command, 'setrep')
self.assertEqual(output.dir, ['some_dir'])
self.assertEqual(output.single_int_arg, 3)
#wrong type for mode argument
with self.assertRaises(SystemExit):
parser.parse('setrep not_an_int some_dir'.split())
#multiple dirs
output = parser.parse('setrep 3 dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
self.assertEqual(output.single_int_arg, 3)
#recursive
output = parser.parse('setrep -R 3 some_dir'.split())
self.assertTrue(output.recurse)
def test_usage(self):
parser = self.parser
#no command
output = parser.parse('usage'.split())
self.assertEqual(output.command, 'usage')
#one dir
output = parser.parse('usage some_cmd'.split())
self.assertEqual(output.command, 'usage')
self.assertEqual(output.arg, ['some_cmd'])
#multiple dirs
output = parser.parse('usage cmd1 cmd2 cmd3'.split())
self.assertEqual(output.arg, ['cmd1', 'cmd2', 'cmd3'])
def test_stat(self):
parser = self.parser
#no dir
with self.assertRaises(SystemExit):
parser.parse('stat'.split())
#one dir
output = parser.parse('stat some_dir'.split())
self.assertEqual(output.command, 'stat')
self.assertEqual(output.dir, ['some_dir'])
#multiple dirs
output = parser.parse('stat dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
def test_test(self):
parser = self.parser
#no dir
with self.assertRaises(SystemExit):
parser.parse('test'.split())
#one dir
output = parser.parse('test some_dir'.split())
self.assertEqual(output.command, 'test')
self.assertEqual(output.single_arg, 'some_dir')
#multiple dirs
with self.assertRaises(SystemExit):
parser.parse('test dir1 dir2 dir3'.split())
#specific commands
output = parser.parse('test -d -z -e some_dir'.split())
self.assertTrue(output.directory)
self.assertTrue(output.zero)
self.assertTrue(output.exists)
self.assertEqual(output.single_arg, 'some_dir')
def test_cat(self):
parser = self.parser
#no path
with self.assertRaises(SystemExit):
parser.parse('cat'.split())
#one path
output = parser.parse('cat some_file'.split())
self.assertEqual(output.command, 'cat')
self.assertEqual(output.dir, ['some_file'])
#multiple paths
output = parser.parse('cat dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
#specific commands
output = parser.parse('cat -checkcrc dir1 dir2'.split())
self.assertEqual(output.checkcrc, True)
def test_copyFromLocal(self):
parser = self.parser
#no dir
with self.assertRaises(SystemExit):
parser.parse('copyFromLocal'.split())
#one dir
with self.assertRaises(SystemExit):
parser.parse('copyFromLocal some_dir'.split())
#two dirs
output = parser.parse('copyFromLocal dir1 dir2'.split())
self.assertEqual(output.dir, ['dir1'])
self.assertEqual(output.single_arg, 'dir2')
def test_copyToLocal(self):
parser = self.parser
#no dir
with self.assertRaises(SystemExit):
parser.parse('copyToLocal'.split())
#one dir
with self.assertRaises(SystemExit):
parser.parse('copyToLocal some_dir'.split())
#two dirs
output = parser.parse('copyToLocal dir1 dir2'.split())
self.assertEqual(output.dir, ['dir1'])
self.assertEqual(output.single_arg, 'dir2')
self.assertEqual(output.checkcrc, False)
#specific commands
output = parser.parse('copyToLocal -checkcrc dir1 dir2'.split())
self.assertEqual(output.checkcrc, True)
def test_cp(self):
parser = self.parser
#no dir
with self.assertRaises(SystemExit):
parser.parse('cp'.split())
#one dir
with self.assertRaises(SystemExit):
parser.parse('cp some_dir'.split())
#multiple dirs
output = parser.parse('cp dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2'])
self.assertEqual(output.single_arg, 'dir3')
def test_get(self):
parser = self.parser
#no dir
with self.assertRaises(SystemExit):
parser.parse('get'.split())
#one dir
with self.assertRaises(SystemExit):
parser.parse('get some_dir'.split())
#multiple dirs
output = parser.parse('get dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2'])
self.assertEqual(output.single_arg, 'dir3')
#specific commands
output = parser.parse('get -checkcrc dir1 dir2'.split())
self.assertEqual(output.checkcrc, True)
def test_getmerge(self):
parser = self.parser
#no dir
with self.assertRaises(SystemExit):
parser.parse('getmerge'.split())
#one dir
with self.assertRaises(SystemExit):
parser.parse('getmerge some_dir'.split())
#two dirs
output = parser.parse('getmerge dir1 dir2'.split())
self.assertEqual(output.src_dst[0], 'dir1')
self.assertEqual(output.src_dst[1], 'dir2')
#multiple dirs
with self.assertRaises(SystemExit):
parser.parse('getmerge dir1 dir2 dir3'.split())
# def test_put(self):
# parser = self.parser
# #no dir
# with self.assertRaises(SystemExit):
# parser.parse('put'.split())
# #one dir
# with self.assertRaises(SystemExit):
# parser.parse('put some_dir'.split())
# #multiple dirs
# output = parser.parse('put dir1 dir2 dir3'.split())
# self.assertEqual(output.dir, ['dir1', 'dir2'])
# self.assertEqual(output.single_arg, 'dir3')
def test_tail(self):
parser = self.parser
#no dir
with self.assertRaises(SystemExit):
parser.parse('tail'.split())
#one dir
output = parser.parse('tail some_dir'.split())
self.assertEqual(output.single_arg, 'some_dir')
#multiple dirs
with self.assertRaises(SystemExit):
parser.parse('tail dir1 dir2'.split())
#specific commands
output = parser.parse('tail -f some_dir'.split())
self.assertTrue(output.append)
def test_text(self):
parser = self.parser
#no path
with self.assertRaises(SystemExit):
parser.parse('text'.split())
#one path
output = parser.parse('text some_file'.split())
self.assertEqual(output.command, 'text')
self.assertEqual(output.dir, ['some_file'])
#multiple paths
output = parser.parse('text dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
#specific commands
output = parser.parse('text -checkcrc dir1 dir2'.split())
self.assertEqual(output.checkcrc, True)
class MockParseArgs(object):
# dir is a list of directories
def __init__(self, dir=[],
single_arg=None,
command=None,
namenode=None,
port=None,
usetrash=False,
skiptrash=False):
self.dir = dir
self.single_arg = single_arg
self.command = command
self.namenode = namenode
self.port = port
self.usetrash = usetrash
self.skiptrash = skiptrash
def __contains__(self, b):
return b in self.__dict__
class CommandLineParserInternalConfigTest(unittest2.TestCase):
def setUp(self):
self.parser = CommandLineParser()
self.default_dir = os.path.join("/user", pwd.getpwuid(os.getuid())[0])
def assert_namenode_spec(self, host, port, version=None):
self.assertEqual(self.parser.args.namenode, host)
self.assertEqual(self.parser.args.port, port)
if version:
self.assertEqual(self.parser.args.version, version)
def assert_namenodes_spec(self, host, port, version=None):
for namenode in self.parser.namenodes:
try:
self.assertEqual(namenode.host, host)
self.assertEqual(namenode.port, port)
if version:
self.assertEqual(namenode.version, version)
except AssertionError:
continue
# There was no AssertError -> we found our NN
return
self.fail("NN not found in namenodes")
def test_cl_config_conflicted(self):
self.parser.args = MockParseArgs(dir=["hdfs://foobar:50070/user/rav",
"hdfs://foobar2:50070/user/rav"])
with self.assertRaises(SystemExit):
self.parser.read_config()
self.parser.args = MockParseArgs(dir=["hdfs://foobar:50071/user/rav",
"hdfs://foobar:50070/user/rav"])
with self.assertRaises(SystemExit):
self.parser.read_config()
self.parser.args = MockParseArgs(dir=["hdfs://foobar:50072/user/rav",
"hdfs://foobar2:50070/user/rav"])
with self.assertRaises(SystemExit):
self.parser.read_config()
self.parser.args = MockParseArgs(dir=["hdfs://foobar:50070/user/rav",
"hdfs://foobar:50070/user/rav"],
single_arg="hdfs://foobar2:50070/user/rav",
command="mv")
with self.assertRaises(SystemExit):
self.parser.read_config()
def test_cl_config_simple(self):
self.parser.args = MockParseArgs(dir=["hdfs://foobar:50070/user/rav",
"hdfs://foobar:50070/user/rav2"])
self.parser.read_config()
self.assert_namenode_spec("foobar", 50070)
self.assert_namenodes_spec("foobar", 50070)
self.parser.args = MockParseArgs(dir=["hdfs://foobar:50070/user/rav",
"hdfs://foobar:50070/user/rav2"],
single_arg="hdfs://foobar:50070/user/rav",
command="mv")
self.parser.read_config()
self.assert_namenode_spec("foobar", 50070)
self.assert_namenodes_spec("foobar", 50070)
def test_cl_config_slash_madness_check_scheme(self):
self.parser.args = MockParseArgs(dir=["hdfs://foobar:50070///user//rav",
"hdfs://foobar:50070/user/////rav2"])
self.parser.read_config()
self.assert_namenode_spec("foobar", 50070)
self.assert_namenodes_spec("foobar", 50070)
self.parser.args = MockParseArgs(dir=["hdfs://foobar:50070/user////rav",
"hdfs://foobar:50070////user/rav2"],
single_arg="hdfs://foobar:50070/////user/rav",
command="mv")
self.parser.read_config()
self.assert_namenode_spec("foobar", 50070)
self.assert_namenodes_spec("foobar", 50070)
def test_cl_config_slash_madness_full_check(self):
self.parser.args = MockParseArgs(dir=["hdfs://foobar/user////rav",
"hdfs://foobar////user/rav2"],
single_arg="hdfs://foobar/////user/rav",
command="mv")
self.parser.init()
self.assert_namenode_spec("foobar", Namenode.DEFAULT_PORT)
self.assert_namenodes_spec("foobar", Namenode.DEFAULT_PORT)
self.assertIn("/user////rav", self.parser.args.dir)
self.assertIn("////user/rav2", self.parser.args.dir)
self.assertEqual(self.parser.args.single_arg, "/////user/rav")
def test_cl_config_reduce_paths(self):
self.parser.args = MockParseArgs(dir=["hdfs://foobar:50070/user/rav",
"hdfs://foobar:50070/user/rav2"],
single_arg="hdfs://foobar:50070/user/rav3",
command="mv")
self.parser.init()
self.assert_namenode_spec("foobar", 50070)
self.assertIn("/user/rav", self.parser.args.dir)
self.assertIn("/user/rav2", self.parser.args.dir)
self.assertEqual(self.parser.args.single_arg, "/user/rav3")
def test_cl_config_test_single_arg_hdfs_paths(self):
self.parser.args = MockParseArgs(single_arg="hdfs://foobar:50070/user/rav3",
command="test")
self.parser.init()
self.assert_namenode_spec("foobar", 50070)
self.assertEqual(self.parser.args.single_arg, "/user/rav3")
def test_cl_config_tail_single_arg_hdfs_paths(self):
self.parser.args = MockParseArgs(single_arg="hdfs://foobar:50070/user/rav3",
command="tail")
self.parser.init()
self.assert_namenode_spec("foobar", 50070)
self.assertEqual(self.parser.args.single_arg, "/user/rav3")
def test_cl_config_mv_single_arg_hdfs_paths(self):
self.parser.args = MockParseArgs(single_arg="hdfs://foobar:50070/user/rav3",
command="mv")
self.parser.init()
self.assert_namenode_spec("foobar", 50070)
self.assertEqual(self.parser.args.single_arg, "/user/rav3")
import snakebite.config
@patch.object(snakebite.config.HDFSConfig, 'get_external_config')
@patch("snakebite.commandlineparser.CommandLineParser._read_config_snakebiterc", return_value=None)
def test_config_no_config(self, config_mock, read_config_mock):
hadoop_home = None
config_mock.return_value = []
if os.environ.get("HADOOP_HOME"):
hadoop_home = os.environ["HADOOP_HOME"]
del os.environ["HADOOP_HOME"]
self.parser.args = MockParseArgs()
with self.assertRaises(SystemExit):
self.parser.read_config()
if hadoop_home:
os.environ["HADOOP_HOME"] = hadoop_home
self.assert_namenode_spec(None, None)
valid_snake_one_rc = {"namenode": "foobar", "version": 9, "port": 54310}
valid_snake_ha_rc = [{"namenode": "foobar", "version": 9, "port": 54310},
{"namenode": "foobar2", "version": 9, "port": 54310}]
invalid_snake_rc = "hdfs://foobar:54310"
@patch("os.path.exists")
def test_read_config_snakebiterc_one_valid(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_snake_one_rc))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
self.parser.read_config()
self.assert_namenodes_spec("foobar", 54310, 9)
self.assertEquals(self.parser.args.usetrash, HDFSConfig.use_trash)
@patch("os.path.exists")
def test_read_config_snakebiterc_ha_valid(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_snake_ha_rc))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
self.parser.read_config()
self.assert_namenodes_spec("foobar", 54310, 9)
self.assert_namenodes_spec("foobar2", 54310, 9)
self.assertEquals(self.parser.args.usetrash, HDFSConfig.use_trash)
@patch("os.path.exists")
def test_read_config_snakebiterc_invalid(self, exists_mock):
m = mock_open(read_data=json.dumps(self.invalid_snake_rc))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
with self.assertRaises(SystemExit):
self.parser.read_config()
valid_snake_noport_one_rc = {"namenode": "foobar", "version": 11}
valid_snake_noport_ha_rc = [{"namenode": "foobar", "version": 100},
{"namenode": "foobar2", "version": 100}]
@patch("os.path.exists")
def test_read_config_snakebiterc_noport_one_valid(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_snake_noport_one_rc))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
self.parser.read_config()
self.assert_namenodes_spec("foobar", Namenode.DEFAULT_PORT, 11)
self.assertEquals(self.parser.args.usetrash, HDFSConfig.use_trash)
@patch("os.path.exists")
def test_read_config_snakebiterc_noport_ha_valid(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_snake_noport_ha_rc))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
self.parser.read_config()
self.assert_namenodes_spec("foobar", Namenode.DEFAULT_PORT, 100)
self.assert_namenodes_spec("foobar2", Namenode.DEFAULT_PORT, 100)
self.assertEquals(self.parser.args.usetrash, HDFSConfig.use_trash)
valid_snake_noport_nov_one_rc = {"namenode": "foobar"}
valid_snake_noport_nov_ha_rc = [{"namenode": "foobar"},
{"namenode": "foobar2"}]
@patch("os.path.exists")
def test_read_config_snakebiterc_noport_nov_one_valid(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_snake_noport_nov_one_rc))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
self.parser.read_config()
self.assert_namenodes_spec("foobar", Namenode.DEFAULT_PORT, Namenode.DEFAULT_VERSION)
self.assertEquals(self.parser.args.usetrash, HDFSConfig.use_trash)
@patch("os.path.exists")
def test_read_config_snakebiterc_noport_nov_ha_valid(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_snake_noport_nov_ha_rc))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
self.parser.read_config()
self.assert_namenodes_spec("foobar", Namenode.DEFAULT_PORT, Namenode.DEFAULT_VERSION)
self.assert_namenodes_spec("foobar2", Namenode.DEFAULT_PORT, Namenode.DEFAULT_VERSION)
self.assertEquals(self.parser.args.usetrash, HDFSConfig.use_trash)
valid_snake_noport_mix_rc = [{"namenode": "foobar", "version": 100},
{"namenode": "foobar2", "port": 66}]
@patch("os.path.exists")
def test_read_config_snakebiterc_noport_mix_valid(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_snake_noport_mix_rc))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
self.parser.read_config()
self.assert_namenodes_spec("foobar", Namenode.DEFAULT_PORT, 100)
self.assert_namenodes_spec("foobar2", 66, Namenode.DEFAULT_VERSION)
self.assertEquals(self.parser.args.usetrash, HDFSConfig.use_trash)
valid_snake_one_rc_v2 = {
"config_version": 2,
"use_trash": False,
"namenodes": [
{"host": "foobar3", "version": 9, "port": 54310}
]
}
valid_snake_ha_rc_v2 = {
"config_version": 2,
"use_trash": True,
"namenodes": [
{"host": "foobar4", "version": 9, "port": 54310},
{"host": "foobar5", "version": 9, "port": 54310}
]
}
invalid_snake_rc_v2 = "hdfs://foobar:54310"
@patch("os.path.exists")
def test_read_config_snakebiterc_one_valid_v2(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_snake_one_rc_v2))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
self.parser.read_config()
self.assertFalse(self.parser.args.usetrash)
self.assert_namenodes_spec("foobar3", 54310, 9)
@patch("os.path.exists")
def test_read_config_snakebiterc_ha_valid_v2(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_snake_ha_rc_v2))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
self.parser.read_config()
self.assertTrue(self.parser.args.usetrash)
self.assert_namenodes_spec("foobar4", 54310, 9)
self.assert_namenodes_spec("foobar5", 54310, 9)
@patch("os.path.exists")
def test_read_config_snakebiterc_invalid_v2(self, exists_mock):
m = mock_open(read_data=json.dumps(self.invalid_snake_rc_v2))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
with self.assertRaises(SystemExit):
self.parser.read_config()
valid_snake_noport_one_rc_v2 = {
"config_version": 2,
"use_trash": False,
"namenodes": [
{"host": "foobar3", "version": 9}
]
}
valid_snake_mix_ha_rc_v2 = {
"config_version": 2,
"use_trash": True,
"namenodes": [
{"host": "foobar4", "version": 100},
{"host": "foobar5", "port": 54310}
]
}
@patch("os.path.exists")
def test_read_config_snakebiterc_noport_one_valid_v2(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_snake_noport_one_rc_v2))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
self.parser.read_config()
self.assertFalse(self.parser.args.usetrash)
self.assert_namenodes_spec("foobar3", Namenode.DEFAULT_PORT, 9)
@patch("os.path.exists")
def test_read_config_snakebiterc_mix_ha_valid_v2(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_snake_mix_ha_rc_v2))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
self.parser.read_config()
self.assertTrue(self.parser.args.usetrash)
self.assert_namenodes_spec("foobar4", Namenode.DEFAULT_PORT, 100)
self.assert_namenodes_spec("foobar5", 54310, Namenode.DEFAULT_VERSION)
def test_cl_default_port(self):
self.parser.args = MockParseArgs(dir=["hdfs://foobar/user/rav"],
single_arg="hdfs://foobar/user/rav",
command="mv")
self.parser.read_config()
self.assert_namenode_spec("foobar", Namenode.DEFAULT_PORT)
def test_cl_trash_setting_preserved_after_cl_config(self):
# no snakebiterc
# read config from CL
self.parser.args = MockParseArgs(dir=["hdfs://foobar:50070/user/rav"],
skiptrash=True,
command="rm")
self.parser.read_config()
self.assert_namenode_spec("foobar", 50070)
self.assert_namenodes_spec("foobar", 50070)
self.assertEquals(self.parser.args.skiptrash, True)
def _revert_hdfs_try_paths(self):
# Make sure HDFSConfig is in vanilla state
HDFSConfig.use_trash = False
HDFSConfig.hdfs_try_paths = ConfigTest.original_hdfs_try_path
HDFSConfig.core_try_paths = ConfigTest.original_core_try_path
@patch("os.path.exists")
def test_cl_trash_setting_preserved_after_snakebiterc_one_valid(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_snake_one_rc))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs(usetrash=True)
self.parser.read_config()
self.assert_namenodes_spec("foobar", 54310, 9)
self.assertTrue(self.parser.args.usetrash)
@patch('os.environ.get')
def test_cl_usetrash_setting_preserved_after_external_nontrash_config(self, environ_get):
environ_get.return_value = False
# no snakebiterc
# read external config (hdfs-site, core-site)
self.parser.args = MockParseArgs(dir=["/user/rav/test"],
usetrash=True,
command="rm")
try:
HDFSConfig.core_try_paths = (ConfigTest.get_config_path('ha-core-site.xml'),)
HDFSConfig.hdfs_try_paths = (ConfigTest.get_config_path('ha-noport-hdfs-site.xml'),)
self.parser.init()
self.assertTrue(self.parser.args.usetrash)
self.assertTrue(self.parser.client.use_trash)
finally:
self._revert_hdfs_try_paths()
@patch('os.environ.get')
def test_cl_skiptrash_setting_preserved_after_external_nontrash_config(self, environ_get):
environ_get.return_value = False
# no snakebiterc
# read external config (hdfs-site, core-site)
self.parser.args = MockParseArgs(dir=["/user/rav/test"],
skiptrash=True,
usetrash=True,
command="rm")
try:
HDFSConfig.core_try_paths = (ConfigTest.get_config_path('ha-core-site.xml'),)
HDFSConfig.hdfs_try_paths = (ConfigTest.get_config_path('ha-noport-hdfs-site.xml'),)
self.parser.init()
self.assertTrue(self.parser.args.skiptrash)
self.assertTrue(self.parser.args.usetrash)
self.assertFalse(self.parser.client.use_trash)
finally:
self._revert_hdfs_try_paths()
class CommandLineParserExecuteTest(unittest2.TestCase):
def test_execute_does_not_swallow_tracebacks(self):
with patch.dict(Commands.methods, clear=True):
@CommandLineParser.command.im_func()
def boom(*args, **kwargs):
def subboom():
raise IndexError("Boom!")
subboom()
parser = CommandLineParser()
parser.parse(["boom"])
try:
parser.execute()
except IndexError:
_, _, exc_traceback = sys.exc_info()
self.assertIn(
"subboom()\n",
traceback.format_exc(),
msg="Lost some stack frames when re-raising!",
)
else:
self.fail("execute() should have raised an IndexError!")
| apache-2.0 |
texttochange/vusion-backend | vusion/persist/schedule/tests/test_schedule.py | 1 | 1483 | """Tests for vusion.persist.schedule."""
from datetime import timedelta, datetime
from twisted.trial.unittest import TestCase
from vusion.persist import schedule_generator, DialogueSchedule
from tests.utils import ObjectMaker
from vusion.utils import time_from_vusion_format, time_to_vusion_format
class TestSchedule(TestCase, ObjectMaker):
def test_instanciate(self):
sometime = time_from_vusion_format('2014-10-02T10:00:00')
schedule = DialogueSchedule(**self.mkobj_schedule(date_time=time_to_vusion_format(sometime)))
self.assertEqual('2014-10-02T10:00:00', schedule['date-time'])
schedule = DialogueSchedule(**self.mkobj_schedule(date_time=sometime))
self.assertEqual('2014-10-02T10:00:00', schedule['date-time'])
def test_is_expired(self):
now = datetime.now()
schedule = schedule_generator(**self.mkobj_schedule(
date_time=time_to_vusion_format(now)))
self.assertFalse(schedule.is_expired(now))
past = now - timedelta(minutes=61)
schedule = schedule_generator(**self.mkobj_schedule(
date_time=time_to_vusion_format(past)))
self.assertTrue(schedule.is_expired(now))
future = now + timedelta(minutes=15)
schedule = schedule_generator(**self.mkobj_schedule(
date_time=time_to_vusion_format(future)))
self.assertFalse(schedule.is_expired(now)) | bsd-3-clause |
hanlind/nova | nova/tests/unit/api/openstack/compute/test_image_metadata.py | 8 | 17028 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from oslo_serialization import jsonutils
import webob
from nova.api.openstack.compute import image_metadata as image_metadata_v21
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import image_fixtures
IMAGE_FIXTURES = image_fixtures.get_image_fixtures()
CHK_QUOTA_STR = 'nova.api.openstack.common.check_img_metadata_properties_quota'
def get_image_123():
return copy.deepcopy(IMAGE_FIXTURES)[0]
class ImageMetaDataTestV21(test.NoDBTestCase):
controller_class = image_metadata_v21.ImageMetadataController
invalid_request = exception.ValidationError
def setUp(self):
super(ImageMetaDataTestV21, self).setUp()
self.controller = self.controller_class()
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_index(self, get_all_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
res_dict = self.controller.index(req, '123')
expected = {'metadata': {'key1': 'value1'}}
self.assertEqual(res_dict, expected)
get_all_mocked.assert_called_once_with(mock.ANY, '123')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_show(self, get_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
res_dict = self.controller.show(req, '123', 'key1')
self.assertIn('meta', res_dict)
self.assertEqual(len(res_dict['meta']), 1)
self.assertEqual('value1', res_dict['meta']['key1'])
get_mocked.assert_called_once_with(mock.ANY, '123')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_show_not_found(self, _get_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key9')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, '123', 'key9')
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotFound(image_id='100'))
def test_show_image_not_found(self, _get_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata/key1')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, '100', 'key9')
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_create(self, get_mocked, update_mocked, quota_mocked):
mock_result = copy.deepcopy(get_image_123())
mock_result['properties']['key7'] = 'value7'
update_mocked.return_value = mock_result
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
req.method = 'POST'
body = {"metadata": {"key7": "value7"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res = self.controller.create(req, '123', body=body)
get_mocked.assert_called_once_with(mock.ANY, '123')
expected = copy.deepcopy(get_image_123())
expected['properties'] = {
'key1': 'value1', # existing meta
'key7': 'value7' # new meta
}
quota_mocked.assert_called_once_with(mock.ANY, expected["properties"])
update_mocked.assert_called_once_with(mock.ANY, '123', expected,
data=None, purge_props=True)
expected_output = {'metadata': {'key1': 'value1', 'key7': 'value7'}}
self.assertEqual(expected_output, res)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotFound(image_id='100'))
def test_create_image_not_found(self, _get_mocked, update_mocked,
quota_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata')
req.method = 'POST'
body = {"metadata": {"key7": "value7"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, req, '100', body=body)
self.assertFalse(quota_mocked.called)
self.assertFalse(update_mocked.called)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_update_all(self, get_mocked, update_mocked, quota_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
req.method = 'PUT'
body = {"metadata": {"key9": "value9"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res = self.controller.update_all(req, '123', body=body)
get_mocked.assert_called_once_with(mock.ANY, '123')
expected = copy.deepcopy(get_image_123())
expected['properties'] = {
'key9': 'value9' # replace meta
}
quota_mocked.assert_called_once_with(mock.ANY, expected["properties"])
update_mocked.assert_called_once_with(mock.ANY, '123', expected,
data=None, purge_props=True)
expected_output = {'metadata': {'key9': 'value9'}}
self.assertEqual(expected_output, res)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotFound(image_id='100'))
def test_update_all_image_not_found(self, _get_mocked, quota_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata')
req.method = 'PUT'
body = {"metadata": {"key9": "value9"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update_all, req, '100', body=body)
self.assertFalse(quota_mocked.called)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_update_item(self, _get_mocked, update_mocked, quota_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "zz"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res = self.controller.update(req, '123', 'key1', body=body)
expected = copy.deepcopy(get_image_123())
expected['properties'] = {
'key1': 'zz' # changed meta
}
quota_mocked.assert_called_once_with(mock.ANY, expected["properties"])
update_mocked.assert_called_once_with(mock.ANY, '123', expected,
data=None, purge_props=True)
expected_output = {'meta': {'key1': 'zz'}}
self.assertEqual(res, expected_output)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotFound(image_id='100'))
def test_update_item_image_not_found(self, _get_mocked, quota_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "zz"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update, req, '100', 'key1',
body=body)
self.assertFalse(quota_mocked.called)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get')
def test_update_item_bad_body(self, get_mocked, update_mocked,
quota_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
req.method = 'PUT'
body = {"key1": "zz"}
req.body = b''
req.headers["content-type"] = "application/json"
self.assertRaises(self.invalid_request,
self.controller.update, req, '123', 'key1',
body=body)
self.assertFalse(get_mocked.called)
self.assertFalse(quota_mocked.called)
self.assertFalse(update_mocked.called)
@mock.patch(CHK_QUOTA_STR,
side_effect=webob.exc.HTTPBadRequest())
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get')
def test_update_item_too_many_keys(self, get_mocked, update_mocked,
_quota_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
req.method = 'PUT'
body = {"meta": {"foo": "bar"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, '123', 'key1',
body=body)
self.assertFalse(get_mocked.called)
self.assertFalse(update_mocked.called)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_update_item_body_uri_mismatch(self, _get_mocked, update_mocked,
quota_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/bad')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, '123', 'bad',
body=body)
self.assertFalse(quota_mocked.called)
self.assertFalse(update_mocked.called)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_delete(self, _get_mocked, update_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
req.method = 'DELETE'
res = self.controller.delete(req, '123', 'key1')
expected = copy.deepcopy(get_image_123())
expected['properties'] = {}
update_mocked.assert_called_once_with(mock.ANY, '123', expected,
data=None, purge_props=True)
self.assertIsNone(res)
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_delete_not_found(self, _get_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/blah')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, '123', 'blah')
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotFound(image_id='100'))
def test_delete_image_not_found(self, _get_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata/key1')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, '100', 'key1')
@mock.patch(CHK_QUOTA_STR,
side_effect=webob.exc.HTTPForbidden(explanation=''))
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_too_many_metadata_items_on_create(self, _get_mocked,
update_mocked, _quota_mocked):
body = {"metadata": {"foo": "bar"}}
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, req, '123', body=body)
self.assertFalse(update_mocked.called)
@mock.patch(CHK_QUOTA_STR,
side_effect=webob.exc.HTTPForbidden(explanation=''))
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_too_many_metadata_items_on_put(self, _get_mocked,
update_mocked, _quota_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/blah')
req.method = 'PUT'
body = {"meta": {"blah": "blah", "blah1": "blah1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.invalid_request,
self.controller.update, req, '123', 'blah',
body=body)
self.assertFalse(update_mocked.called)
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotAuthorized(image_id='123'))
def test_image_not_authorized_update(self, _get_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.update, req, '123', 'key1',
body=body)
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotAuthorized(image_id='123'))
def test_image_not_authorized_update_all(self, _get_mocked):
image_id = 131
# see nova.tests.unit.api.openstack.fakes:_make_image_fixtures
req = fakes.HTTPRequest.blank('/v2/fake/images/%s/metadata/key1'
% image_id)
req.method = 'PUT'
body = {"metadata": {"key1": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.update_all, req, image_id,
body=body)
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotAuthorized(image_id='123'))
def test_image_not_authorized_create(self, _get_mocked):
image_id = 131
# see nova.tests.unit.api.openstack.fakes:_make_image_fixtures
req = fakes.HTTPRequest.blank('/v2/fake/images/%s/metadata/key1'
% image_id)
req.method = 'POST'
body = {"metadata": {"key1": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, req, image_id,
body=body)
class ImageMetadataControllerV239(test.NoDBTestCase):
def setUp(self):
super(ImageMetadataControllerV239, self).setUp()
self.controller = image_metadata_v21.ImageMetadataController()
self.req = fakes.HTTPRequest.blank('', version='2.39')
def test_not_found_for_all_image_metadata_api(self):
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.index, self.req)
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.show, self.req, fakes.FAKE_UUID)
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.create, self.req,
fakes.FAKE_UUID, {'metadata': {}})
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.update, self.req,
fakes.FAKE_UUID, 'id', {'metadata': {}})
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.update_all, self.req,
fakes.FAKE_UUID, {'metadata': {}})
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.delete, self.req, fakes.FAKE_UUID)
| apache-2.0 |
miltonruelas/cursotecnico | branch/l10n_pe_ple/__init__.py | 8 | 1479 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 Cubic ERP - Teradata SAC. (http://cubicerp.com).
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import sunat
import tables
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Bertrand256/dash-masternode-tool | src/dashd_intf.py | 1 | 69200 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: Bertrand256
# Created on: 2017-03
from __future__ import annotations
import decimal
import functools
import json
import os
import re
import socket
import ssl
import threading
import time
import datetime
import logging
from PyQt5.QtCore import QThread
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException, EncodeDecimal
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
from paramiko import AuthenticationException, PasswordRequiredException, SSHException
from paramiko.ssh_exception import NoValidConnectionsError, BadAuthenticationType
from typing import List, Dict, Union, Callable, Optional
import app_cache
from app_config import AppConfig
from random import randint
from wnd_utils import WndUtils
import socketserver
import select
from psw_cache import SshPassCache
from common import AttrsProtected, CancelException
log = logging.getLogger('dmt.dashd_intf')
try:
import http.client as httplib
except ImportError:
import httplib
# how many seconds cached masternodes data are valid; cached masternode data is used only for non-critical
# features
MASTERNODES_CACHE_VALID_SECONDS = 60 * 60 # 60 minutes
PROTX_CACHE_VALID_SECONDS = 60 * 60
class ForwardServer (socketserver.ThreadingTCPServer):
daemon_threads = True
allow_reuse_address = True
class Handler(socketserver.BaseRequestHandler):
def handle(self):
try:
log.debug('Handler, starting ssh_transport.open_channel')
chan = self.ssh_transport.open_channel(kind='direct-tcpip',
dest_addr=(self.chain_host, self.chain_port),
src_addr=self.request.getpeername())
log.debug('Handler, started ssh_transport.open_channel')
except Exception as e:
log.error('open_channel error: ' + str(e))
if self.broken_conn_callback is not None:
self.broken_conn_callback()
return
if chan is None:
return
try:
while True:
r, w, x = select.select([self.request, chan], [], [], 10)
if self.request in r:
data = self.request.recv(1024)
if len(data) == 0:
break
chan.send(data)
log.debug(f'SSH tunnel - sent {len(data)} bytes')
if chan in r:
data = chan.recv(1024)
if len(data) == 0:
break
self.request.send(data)
log.debug(f'SSH tunnel - received {len(data)} bytes')
log.debug('Finishing Handler.handle')
except socket.error as e:
log.error('Handler socker.error occurred: ' + str(e))
except Exception as e:
log.error('Handler exception occurred: ' + str(e))
finally:
chan.close()
self.request.close()
class SSHTunnelThread(QThread):
def __init__(self, local_port, remote_ip, remote_port, transport, ready_event,
on_connection_broken_callback=None, on_finish_thread_callback=None):
QThread.__init__(self)
self.local_port = local_port
self.remote_ip = remote_ip
self.remote_port = remote_port
self.transport = transport
self.ready_event = ready_event
self.forward_server = None
self.on_connection_broken_callback = on_connection_broken_callback
self.on_finish_thread_callback = on_finish_thread_callback
self.setObjectName('SSHTunnelThread')
def __del__(self):
pass
def stop(self):
if self.forward_server:
self.forward_server.shutdown()
def handler_broken_connection_callback(self):
try:
self.stop()
if self.on_connection_broken_callback is not None:
self.on_connection_broken_callback()
except:
log.exception('Exception while shutting down forward server.')
def run(self):
class SubHandler(Handler):
chain_host = self.remote_ip
chain_port = self.remote_port
ssh_transport = self.transport
broken_conn_callback = self.handler_broken_connection_callback
try:
self.ready_event.set()
log.debug('Started SSHTunnelThread, local port forwarding 127.0.0.1:%s -> %s:%s' %
(str(self.local_port), self.remote_ip, str(self.remote_port)))
self.forward_server = ForwardServer(('127.0.0.1', self.local_port), SubHandler)
self.forward_server.serve_forever()
log.debug('Stopped local port forwarding 127.0.0.1:%s -> %s:%s' %
(str(self.local_port), self.remote_ip, str(self.remote_port)))
if self.on_finish_thread_callback:
self.on_finish_thread_callback()
except Exception as e:
log.exception('SSH tunnel exception occurred')
class UnknownError(Exception):
pass
class DashdConnectionError(Exception):
def __init__(self, org_exception):
Exception.__init__(org_exception)
self.org_exception = org_exception
class DashdSSH(object):
def __init__(self, host, port, username, on_connection_broken_callback=None, auth_method: str = 'password',
private_key_path: str = ''):
self.host = host
self.port = port
self.username = username
self.ssh = None
self.channel = None
self.fw_channel = None
self.connected = False
self.connection_broken = False
self.ssh_thread = None
self.auth_method = auth_method # 'any', 'password', 'key_pair', 'ssh_agent'
self.private_key_path = private_key_path
self.on_connection_broken_callback = on_connection_broken_callback
def __del__(self):
self.disconnect()
def remote_command(self, cmd):
channel = None
try:
channel = self.ssh.get_transport().open_session()
channel.exec_command(cmd)
ret_code = channel.recv_exit_status()
if ret_code == 0:
for idx in range(1, 20):
if channel.recv_ready():
break
time.sleep(0.1)
if not channel.recv_ready():
raise Exception('Data not ready')
data = channel.recv(500)
return data.decode().split('\n')
else:
for idx in range(1, 20):
if channel.recv_stderr_ready():
break
time.sleep(0.1)
if channel.recv_stderr_ready():
data = channel.recv_stderr(500)
error = data.decode()
raise Exception(error)
else:
raise UnknownError('Unknown error executing remote command: ' + cmd)
finally:
if channel:
channel.close()
def connect(self) -> bool:
import paramiko
if self.ssh is None:
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
password = None
pass_message = None
while True:
try:
if self.auth_method == 'any':
self.ssh.connect(self.host, port=int(self.port), username=self.username, password=password)
elif self.auth_method == 'password':
self.ssh.connect(self.host, port=int(self.port), username=self.username, password=password,
look_for_keys=False, allow_agent=False)
elif self.auth_method == 'key_pair':
if not self.private_key_path:
raise Exception('No RSA private key path was provided.')
self.ssh.connect(self.host, port=int(self.port), username=self.username, password=password,
key_filename=self.private_key_path, look_for_keys=False, allow_agent=False)
elif self.auth_method == 'ssh_agent':
self.ssh.connect(self.host, port=int(self.port), username=self.username, password=password,
look_for_keys=False, allow_agent=True)
self.connected = True
if password:
SshPassCache.save_password(self.username, self.host, password)
break
except PasswordRequiredException as e:
# private key with password protection is used; ask user for password
pass_message = "Enter passphrase for <b>private key</b> or password for %s" % \
(self.username + '@' + self.host)
while True:
password = SshPassCache.get_password(self.username, self.host, message=pass_message)
if password:
break
except BadAuthenticationType as e:
raise Exception(str(e))
except AuthenticationException as e:
# This exception will be raised in the following cases:
# 1. a private key with password protection is used but the user enters incorrect password
# 2. a private key exists but user's public key is not added to the server's allowed keys
# 3. normal login to server is performed but the user enters bad password
# So, in the first case, the second query for password will ask for normal password to server, not
# for a private key.
if self.auth_method == 'key_pair':
WndUtils.error_msg(message=f'Authentication failed for private key: {self.private_key_path} '
f'(username {self.username}).')
break
else:
if password is not None:
WndUtils.error_msg(message='Incorrect password, try again...')
while True:
password = SshPassCache.get_password(self.username, self.host, message=pass_message)
if password:
break
except SSHException as e:
if e.args and e.args[0] == 'No authentication methods available':
while True:
password = SshPassCache.get_password(self.username, self.host)
if password:
break
else:
raise
except Exception as e:
log.exception(str(e))
raise
return self.connected
def on_tunnel_thread_finish(self):
self.ssh_thread = None
def open_tunnel(self, local_port, remote_ip, remote_port):
if self.connected:
if self.ssh_thread is not None:
raise Exception('SSH tunnel already open.')
ready_event = threading.Event()
self.ssh_thread = SSHTunnelThread(local_port, remote_ip, remote_port, self.ssh.get_transport(), ready_event,
on_connection_broken_callback=self.on_connection_broken_callback,
on_finish_thread_callback=self.on_tunnel_thread_finish)
self.ssh_thread.start()
ready_event.wait(10)
# wait a moment for the tunnel to come-up
time.sleep(0.1)
log.debug('Started local port forwarding 127.0.0.1:%s -> %s:%s' %
(str(local_port), remote_ip, str(remote_port)))
else:
raise Exception('SSH not connected')
def find_dashd_config(self):
"""
Try to read configuration of remote dash daemon. In particular we need parameters concerning rpc
configuration.
:return: tuple (dashd_running, dashd_config_found, dashd config file contents as dict)
or error string in error occurred.
"""
dashd_running = False
dashd_config_found = False
if not self.ssh:
raise Exception('SSH session not ready')
try:
# find dashd process id if running
try:
pids = self.remote_command('ps -C "dashd" -o pid')
except UnknownError:
raise Exception('is dashd running on the remote machine?')
pid = None
if isinstance(pids, list):
pids = [pid.strip() for pid in pids]
if len(pids) >= 2 and pids[0] == 'PID' and re.match('\d+', pids[1]):
pid = pids[1]
elif len(pids) >= 1 and re.match('\d+', pids[0]):
pid = pids[1]
config = {}
if pid:
dashd_running = True
# using dashd pid find its executable path and then .dashcore directory and finally dash.conf file
executables = self.remote_command('ls -l /proc/' + str(pid) + '/exe')
if executables and len(executables) >= 1:
elems = executables[0].split('->')
if len(elems) == 2:
executable = elems[1].strip()
dashd_dir = os.path.dirname(executable)
dash_conf_file = dashd_dir + '/.dashcore/dash.conf'
conf_lines = []
try:
conf_lines = self.remote_command('cat ' + dash_conf_file)
except Exception as e:
# probably error no such file or directory
# try to read dashd's cwd + cmdline
cwd_lines = self.remote_command('ls -l /proc/' + str(pid) + '/cwd')
if cwd_lines:
elems = cwd_lines[0].split('->')
if len(elems) >= 2:
cwd = elems[1]
dash_conf_file = cwd + '/.dashcore/dash.conf'
try:
conf_lines = self.remote_command('cat ' + dash_conf_file)
except Exception as e:
# second method did not suceed, so assume, that conf file is located
# i /home/<username>/.dashcore directory
dash_conf_file = '/home/' + self.username + '/.dashcore/dash.conf'
conf_lines = self.remote_command('cat ' + dash_conf_file)
for line in conf_lines:
elems = [e.strip() for e in line.split('=')]
if len(elems) == 2:
config[elems[0]] = elems[1]
dashd_config_found = True
return dashd_running, dashd_config_found, config
except Exception as e:
return str(e)
def disconnect(self):
if self.ssh:
if self.ssh_thread:
self.ssh_thread.stop()
self.ssh.close()
del self.ssh
self.ssh = None
self.connected = False
class DashdIndexException(JSONRPCException):
"""
Exception for notifying, that dash daemon should have indexing option tuned on
"""
def __init__(self, parent_exception):
JSONRPCException.__init__(self, parent_exception.error)
self.message = self.message + \
'\n\nMake sure the dash daemon you are connecting to has the following options enabled in ' \
'its dash.conf:\n\n' + \
'addressindex=1\n' + \
'spentindex=1\n' + \
'timestampindex=1\n' + \
'txindex=1\n\n' + \
'Changing these parameters requires to execute dashd with "-reindex" option (linux: ./dashd -reindex)'
def control_rpc_call(_func=None, *, encrypt_rpc_arguments=False, allow_switching_conns=True):
"""
Decorator dedicated to functions related to RPC calls, taking care of switching an active connection if the
current one becomes faulty. It also performs argument encryption for configured RPC calls.
"""
def control_rpc_call_inner(func):
@functools.wraps(func)
def catch_timeout_wrapper(*args, **kwargs):
ret = None
last_exception = None
self = args[0]
self.mark_call_begin()
try:
self.http_lock.acquire()
last_conn_reset_time = None
for try_nr in range(1, 5):
try:
try:
if encrypt_rpc_arguments:
if self.cur_conn_def:
pubkey = self.cur_conn_def.get_rpc_encryption_pubkey_object()
else:
pubkey = None
if pubkey:
args_str = json.dumps(args[1:])
max_chunk_size = int(pubkey.key_size / 8) - 75
encrypted_parts = []
while args_str:
data_chunk = args_str[:max_chunk_size]
args_str = args_str[max_chunk_size:]
ciphertext = pubkey.encrypt(data_chunk.encode('ascii'),
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None))
encrypted_parts.append(ciphertext.hex())
args = (args[0], 'DMTENCRYPTEDV1') + tuple(encrypted_parts)
log.info(
'Arguments of the "%s" call have been encrypted with the RSA public key of '
'the RPC node.', func.__name__)
ret = func(*args, **kwargs)
last_exception = None
self.mark_cur_conn_cfg_is_ok()
break
except (ConnectionResetError, ConnectionAbortedError, httplib.CannotSendRequest,
BrokenPipeError) as e:
# this exceptions occur usually when the established connection gets disconnected after
# some time of inactivity; try to reconnect within the same connection configuration
log.warning('Error while calling of "' + str(func) + ' (1)". Details: ' + str(e))
if last_conn_reset_time:
raise DashdConnectionError(e) # switch to another config if possible
else:
last_exception = e
last_conn_reset_time = time.time()
self.reset_connection() # retry with the same connection
except (socket.gaierror, ConnectionRefusedError, TimeoutError, socket.timeout,
NoValidConnectionsError) as e:
# exceptions raised most likely by not functioning dashd node; try to switch to another node
# if there is any in the config
log.warning('Error while calling of "' + str(func) + ' (3)". Details: ' + str(e))
raise DashdConnectionError(e)
except JSONRPCException as e:
log.error('Error while calling of "' + str(func) + ' (2)". Details: ' + str(e))
err_message = e.error.get('message','').lower()
self.http_conn.close()
if e.code == -5 and e.message == 'No information available for address':
raise DashdIndexException(e)
elif err_message.find('502 bad gateway') >= 0 or err_message.find('unknown error') >= 0:
raise DashdConnectionError(e)
else:
raise
except DashdConnectionError as e:
# try another net config if possible
log.error('Error while calling of "' + str(func) + '" (4). Details: ' + str(e))
if not allow_switching_conns or not self.switch_to_next_config():
self.last_error_message = str(e.org_exception)
raise e.org_exception # couldn't use another conn config, raise last exception
else:
try_nr -= 1 # another config retries do not count
last_exception = e.org_exception
except Exception:
raise
finally:
self.http_lock.release()
if last_exception:
raise last_exception
return ret
return catch_timeout_wrapper
if _func is None:
return control_rpc_call_inner
else:
return control_rpc_call_inner(_func)
class MasternodeProtx:
def __init__(self):
self.marker = False
self.modified = False
self.db_id: Optional[int] = None
self.protx_hash: str = ''
self.collateral_hash: str = ''
self.collateral_index: int = -1
self.collateral_address: str = ''
self.operator_reward: float = 0.0
self.service: str = ''
self.registered_height: int = -1
self.last_paid_height: int = -1
self.pose_penalty: int = 0
self.pose_revived_height: int = -1
self.pose_ban_height: int = -1
self.owner_address: str = ''
self.voting_address: str = ''
self.payout_address: str = ''
self.pubkey_operator: str = ''
self.operator_payout_address: str = ''
def clear(self):
self.db_id = None
self.protx_hash = ''
self.collateral_hash = ''
self.collateral_index = -1
self.collateral_address = ''
self.operator_reward = 0.0
self.service = ''
self.registered_height = -1
self.last_paid_height = -1
self.pose_penalty = 0
self.pose_revived_height = -1
self.pose_ban_height = -1
self.owner_address = ''
self.voting_address = ''
self.payout_address = ''
self.pubkey_operator = ''
self.operator_payout_address = ''
def copy_from(self, src: MasternodeProtx):
self.protx_hash = src.protx_hash
self.collateral_hash = src.collateral_hash
self.collateral_index = src.collateral_index
self.collateral_address = src.collateral_address
self.operator_reward = src.operator_reward
self.service = src.service
self.registered_height = src.registered_height
self.last_paid_height = src.last_paid_height
self.pose_penalty = src.pose_penalty
self.pose_revived_height = src.pose_revived_height
self.pose_ban_height = src.pose_ban_height
self.owner_address = src.owner_address
self.voting_address = src.voting_address
self.payout_address = src.payout_address
self.pubkey_operator = src.pubkey_operator
self.operator_payout_address = src.operator_payout_address
def copy_from_json(self, protx: Dict):
self.protx_hash = protx.get('proTxHash')
self.collateral_hash = protx.get('collateralHash')
self.collateral_index = protx.get('collateralIndex', 0)
self.collateral_address = protx.get('collateralAddress')
self.operator_reward = protx.get('operatorReward')
s = protx.get('state')
if s and isinstance(s, dict):
self.service = s.get('service')
self.registered_height = s.get('registeredHeight')
self.last_paid_height = s.get('lastPaidHeight')
self.pose_penalty = s.get('PoSePenalty')
self.pose_revived_height = s.get('PoSeRevivedHeight')
self.pose_ban_height = s.get('PoSeBanHeight')
self.owner_address = s.get('ownerAddress')
self.voting_address = s.get('votingAddress')
self.payout_address = s.get('payoutAddress')
self.pubkey_operator = s.get('pubKeyOperator')
self.operator_payout_address = s.get('operatorPayoutAddress')
def __setattr__(self, name, value):
if hasattr(self, name) and name not in ('modified', 'marker', 'db_id', '_AttrsProtected__allow_attr_definition'):
if isinstance(value, decimal.Decimal):
value = float(value)
if getattr(self, name) != value:
self.modified = True
super().__setattr__(name, value)
def update_in_db(self, cursor):
try:
if self.db_id is None:
cursor.execute(
"INSERT INTO protx(protx_hash, collateral_hash, collateral_index, collateral_address,"
"operator_reward, service, registered_height, last_paid_height, pose_penalty, "
"pose_revived_height, pose_ban_height, owner_address, voting_address, payout_address,"
"pubkey_operator, operator_payout_address) "
"VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)",
(self.protx_hash, self.collateral_hash, self.collateral_index,
self.collateral_address, self.operator_reward, self.service,
self.registered_height, self.last_paid_height, self.pose_penalty,
self.pose_revived_height, self.pose_ban_height, self.owner_address,
self.voting_address, self.payout_address, self.pubkey_operator,
self.operator_payout_address))
self.db_id = cursor.lastrowid
else:
cursor.execute(
"update protx set protx_hash=?, collateral_hash=?, collateral_index=?, collateral_address=?,"
"operator_reward=?, service=?, registered_height=?, last_paid_height=?, pose_penalty=?, "
"pose_revived_height=?, pose_ban_height=?, owner_address=?, voting_address=?, payout_address=?,"
"pubkey_operator=?, operator_payout_address=? where id=?",
(self.protx_hash, self.collateral_hash, self.collateral_index,
self.collateral_address, self.operator_reward, self.service,
self.registered_height, self.last_paid_height, self.pose_penalty,
self.pose_revived_height, self.pose_ban_height, self.owner_address,
self.voting_address, self.payout_address, self.pubkey_operator,
self.operator_payout_address, self.db_id))
except Exception as e:
log.exception(str(e))
def delete_from_db(self, cursor):
if self.db_id is not None:
cursor.execute("delete from protx where id=?", (self.db_id,))
class Masternode(AttrsProtected):
def __init__(self):
AttrsProtected.__init__(self)
self.ident: Optional[str] = None
self.status: Optional[str] = None
self.payee: Optional[str] = None
self.lastpaidtime = None
self.lastpaidblock = None
self.ip_port = None
self.protx_hash: Optional[str] = None
self.db_id = None
self.marker = None
self.modified = False
self.monitor_changes = False
self.queue_position = None
self.protx = MasternodeProtx()
self.set_attr_protection()
def copy_from(self, src: Masternode):
if self.ident != src.ident or self.status != src.ident or self.payee != src.payee or \
self.lastpaidtime != src.lastpaidtime or self.lastpaidblock != src.lastpaidblock or \
self.ip_port != src.ip_port or self.protx_hash != src.protx_hash or self.queue_position != src.queue_position:
self.ident = src.ident
self.status = src.status
self.payee = src.payee
self.lastpaidtime = src.lastpaidtime
self.lastpaidblock = src.lastpaidblock
self.ip_port = src.ip_port
self.protx_hash = src.protx_hash
self.queue_position = src.queue_position
self.modified = True
def copy_from_json(self, mn_ident: str, mn_json: Dict):
if self.ident != mn_ident:
self.ident = mn_ident
self.modified = True
if self.status != mn_json.get('status'):
self.status = mn_json.get('status')
self.modified = True
if self.payee != mn_json.get('payee'):
self.payee = mn_json.get('payee')
self.modified = True
if self.lastpaidtime != mn_json.get('lastpaidtime', 0):
self.lastpaidtime = mn_json.get('lastpaidtime', 0)
self.modified = True
if self.lastpaidblock != mn_json.get('lastpaidblock', 0):
self.lastpaidblock = mn_json.get('lastpaidblock', 0)
self.modified = True
if self.ip_port != mn_json.get('address'):
self.ip_port = mn_json.get('address')
self.modified = True
if self.protx_hash != mn_json.get('proTxHash'):
self.protx_hash = mn_json.get('proTxHash')
self.modified = True
@property
def registered_height(self):
if self.protx:
return self.protx.registered_height
def __setattr__(self, name, value):
if hasattr(self, name) and name not in ('modified', 'marker', 'monitor_changes', '_AttrsProtected__allow_attr_definition'):
if self.monitor_changes and getattr(self, name) != value:
self.modified = True
super().__setattr__(name, value)
def json_cache_wrapper(func, intf, cache_file_ident, skip_cache=False,
accept_cache_data_fun: Optional[Callable[[Dict], bool]]=None):
"""
Wrapper for saving/restoring rpc-call results inside cache files.
:param accept_cache_data_fun: reference to an external function verifying whether data read from cache
can be accepted; if not, a normal call to an rpc node will be executed
"""
def json_call_wrapper(*args, **kwargs):
nonlocal skip_cache, cache_file_ident, intf, func
fname = '/insight_dash_'
if intf.app_config.is_testnet:
fname += 'testnet_'
cache_file = intf.app_config.tx_cache_dir + fname + cache_file_ident + '.json'
if not skip_cache:
try: # looking into cache first
with open(cache_file) as fp:
j = json.load(fp, parse_float=decimal.Decimal)
if accept_cache_data_fun is None or accept_cache_data_fun(j):
return j
except:
pass
# if not found in cache, call the original function
j = func(*args, **kwargs)
try:
with open(cache_file, 'w') as fp:
json.dump(j, fp, default=EncodeDecimal)
except Exception as e:
log.exception('Cannot save data to a cache file')
pass
return j
return json_call_wrapper
class DashdInterface(WndUtils):
def __init__(self, window,
on_connection_initiated_callback=None,
on_connection_failed_callback=None,
on_connection_successful_callback=None,
on_connection_disconnected_callback=None):
WndUtils.__init__(self, app_config=None)
self.initialized = False
self.app_config = None
self.db_intf = None
self.connections = []
self.cur_conn_index = 0
self.cur_conn_def: Optional['DashNetworkConnectionCfg'] = None
self.block_timestamps: Dict[int, int] = {}
# below is the connection with which particular RPC call has started; if connection is switched because of
# problems with some nodes, switching stops if we close round and return to the starting connection
self.starting_conn = None
self.masternodes: List[Masternode] = []
self.masternodes_by_ident: Dict[str, Masternode] = {}
self.masternodes_by_ip_port: Dict[str, Masternode] = {}
self.protx_by_hash: Dict[str, MasternodeProtx] = {}
self.ssh = None
self.window = window
self.active = False
self.rpc_url = None
self.proxy = None
self.http_conn = None # HTTPConnection object passed to the AuthServiceProxy (for convinient connection reset)
self.on_connection_initiated_callback = on_connection_initiated_callback
self.on_connection_failed_callback = on_connection_failed_callback
self.on_connection_successful_callback = on_connection_successful_callback
self.on_connection_disconnected_callback = on_connection_disconnected_callback
self.last_error_message = None
self.mempool_txes: Dict[str, Dict] = {}
self.http_lock = threading.RLock()
def initialize(self, config: AppConfig, connection=None, for_testing_connections_only=False):
self.app_config = config
self.app_config = config
self.app_config = config
self.db_intf = self.app_config.db_intf
# conn configurations are used from the first item in the list; if one fails, then next is taken
if connection:
# this parameter is used for testing specific connection
self.connections = [connection]
else:
# get connection list orderd by priority of use
self.connections = self.app_config.get_ordered_conn_list()
self.cur_conn_index = 0
if self.connections:
self.cur_conn_def = self.connections[self.cur_conn_index]
else:
self.cur_conn_def = None
if not for_testing_connections_only:
self.load_data_from_db_cache()
self.initialized = True
def load_data_from_db_cache(self):
self.masternodes.clear()
self.protx_by_hash.clear()
self.masternodes_by_ident.clear()
self.masternodes_by_ip_port.clear()
self.block_timestamps.clear()
cur = self.db_intf.get_cursor()
cur2 = self.db_intf.get_cursor()
db_modified = False
try:
tm_start = time.time()
db_correction_duration = 0.0
log.debug("Reading masternode data from DB")
cur.execute("SELECT id, ident, status, payee, last_paid_time, last_paid_block, IP, queue_position, "
"protx_hash from MASTERNODES where dmt_active=1")
for row in cur.fetchall():
db_id = row[0]
ident = row[1]
# correct duplicated masternodes issue
mn_first = self.masternodes_by_ident.get(ident)
if mn_first is not None:
continue
# delete duplicated (caused by breaking the app while loading)
tm_start_1 = time.time()
cur2.execute('DELETE from MASTERNODES where ident=? and id<>?', (ident, db_id))
if cur2.rowcount > 0:
db_modified = True
db_correction_duration += (time.time() - tm_start_1)
mn = Masternode()
mn.db_id = db_id
mn.ident = ident
mn.status = row[2]
mn.payee = row[3]
mn.lastpaidtime = row[4]
mn.lastpaidblock = row[5]
mn.ip_port = row[6]
mn.queue_position = row[7]
mn.protx_hash = row[8]
self.masternodes.append(mn)
self.masternodes_by_ident[mn.ident] = mn
self.masternodes_by_ip_port[mn.ip_port] = mn
tm_diff = time.time() - tm_start
log.info('DB read time of %d MASTERNODES: %s s, db fix time: %s' %
(len(self.masternodes), str(tm_diff), str(db_correction_duration)))
log.debug("Reading protx data from DB")
cur.execute("SELECT id, protx_hash, collateral_hash, collateral_index, collateral_address,"
"operator_reward, service, registered_height, last_paid_height, pose_penalty,"
"pose_revived_height, pose_ban_height, owner_address, voting_address, payout_address,"
"pubkey_operator, operator_payout_address from protx")
for row in cur.fetchall():
protx = MasternodeProtx()
protx.db_id = row[0]
protx.protx_hash = row[1]
protx.collateral_hash = row[2]
protx.collateral_index = row[3]
protx.collateral_address = row[4]
protx.operator_reward = row[5]
protx.service = row[6]
protx.registered_height = row[7]
protx.last_paid_height = row[8]
protx.pose_penalty = row[9]
protx.pose_revived_height = row[10]
protx.pose_ban_height = row[11]
protx.owner_address = row[12]
protx.voting_address = row[13]
protx.payout_address = row[14]
protx.pubkey_operator = row[15]
protx.operator_payout_address = row[16]
protx.modified = False
self.protx_by_hash[protx.protx_hash] = protx
# assign protx objects to masternodes
for mn in self.masternodes:
protx = self.protx_by_hash.get(mn.protx_hash)
if protx and mn.protx != protx:
mn.protx = protx
log.debug("Finished reading protx data from DB")
except Exception as e:
log.exception('SQLite initialization error')
finally:
if db_modified:
self.db_intf.commit()
self.db_intf.release_cursor()
self.db_intf.release_cursor()
def reload_configuration(self):
"""Called after modification of connections' configuration or changes having impact on the file name
associated to database cache."""
# get connection list orderd by priority of use
self.disconnect()
self.connections = self.app_config.get_ordered_conn_list()
self.cur_conn_index = 0
if len(self.connections):
self.cur_conn_def = self.connections[self.cur_conn_index]
self.load_data_from_db_cache()
else:
self.cur_conn_def = None
def disconnect(self):
if self.active:
log.debug('Disconnecting')
if self.ssh:
self.ssh.disconnect()
del self.ssh
self.ssh = None
self.active = False
if self.on_connection_disconnected_callback:
self.on_connection_disconnected_callback()
def mark_call_begin(self):
self.starting_conn = self.cur_conn_def
def switch_to_next_config(self):
"""
If there is another dashd config not used recently, switch to it. Called only when there was a problem
with current connection config.
:return: True if successfully switched or False if there was no another config
"""
if self.cur_conn_def:
self.app_config.conn_cfg_failure(self.cur_conn_def) # mark connection as defective
if self.cur_conn_index < len(self.connections)-1:
idx = self.cur_conn_index + 1
else:
idx = 0
conn = self.connections[idx]
if conn != self.starting_conn and conn != self.cur_conn_def:
log.debug("Trying to switch to another connection: %s" % conn.get_description())
self.disconnect()
self.cur_conn_index = idx
self.cur_conn_def = conn
if not self.open():
return self.switch_to_next_config()
else:
return True
else:
log.warning('Failed to connect: no another connection configurations.')
return False
def mark_cur_conn_cfg_is_ok(self):
if self.cur_conn_def:
self.app_config.conn_cfg_success(self.cur_conn_def)
def open(self):
"""
Opens connection to dash RPC. If it fails, then the next enabled conn config will be used, if any exists.
:return: True if successfully connected, False if user cancelled the operation. If all of the attempts
fail, then appropriate exception will be raised.
"""
try:
if not self.cur_conn_def:
raise Exception('There is no connections to Dash network enabled in the configuration.')
while True:
try:
if self.open_internal():
break
else:
if not self.switch_to_next_config():
return False
except CancelException:
return False
except (socket.gaierror, ConnectionRefusedError, TimeoutError, socket.timeout,
NoValidConnectionsError) as e:
# exceptions raised by not likely functioning dashd node; try to switch to another node
# if there is any in the config
if not self.switch_to_next_config():
raise e # couldn't use another conn config, raise exception
else:
break
except Exception as e:
self.last_error_message = str(e)
raise
return True
def reset_connection(self):
"""
Called when communication errors are detected while sending RPC commands. Here we are closing the SSH-tunnel
(if used) and HTTP connection object to prepare for another try.
:return:
"""
if self.active:
if self.http_conn:
self.http_conn.close()
if self.ssh:
self.ssh.disconnect()
self.active = False
def open_internal(self):
"""
Try to establish connection to dash RPC daemon for current connection config.
:return: True, if connection successfully establishes, False if user Cancels the operation (not always
cancelling will be possible - only when user is prompted for a password).
"""
if not self.active:
log.info("Connecting to: %s" % self.cur_conn_def.get_description())
try:
# make the owner know, we are connecting
if self.on_connection_initiated_callback:
self.on_connection_initiated_callback()
except:
pass
if self.cur_conn_def.use_ssh_tunnel:
# RPC over SSH
if self.ssh is None:
self.ssh = DashdSSH(self.cur_conn_def.ssh_conn_cfg.host, self.cur_conn_def.ssh_conn_cfg.port,
self.cur_conn_def.ssh_conn_cfg.username,
auth_method=self.cur_conn_def.ssh_conn_cfg.auth_method,
private_key_path=self.cur_conn_def.ssh_conn_cfg.private_key_path)
try:
log.debug('starting ssh.connect')
self.ssh.connect()
log.debug('finished ssh.connect')
except Exception as e:
log.error('error in ssh.connect')
try:
# make the owner know, connection attempt failed
if self.on_connection_failed_callback:
self.on_connection_failed_callback()
except:
log.exception('on_connection_try_fail_callback call exception')
raise
# configure SSH tunnel
# get random local unprivileged port number to establish SSH tunnel
success = False
local_port = None
for try_nr in range(1, 10):
try:
log.debug(f'beginning ssh.open_tunnel, try: {try_nr}')
local_port = randint(2000, 50000)
self.ssh.open_tunnel(local_port,
self.cur_conn_def.host,
int(self.cur_conn_def.port))
success = True
break
except Exception as e:
log.exception('error in ssh.open_tunnel loop: ' + str(e))
log.debug('finished ssh.open_tunnel loop')
if not success:
log.error('finished ssh.open_tunnel loop with error')
return False
else:
rpc_user = self.cur_conn_def.username
rpc_password = self.cur_conn_def.password
rpc_host = '127.0.0.1' # SSH tunnel on loopback
rpc_port = local_port
else:
# direct RPC
rpc_host = self.cur_conn_def.host
rpc_port = self.cur_conn_def.port
rpc_user = self.cur_conn_def.username
rpc_password = self.cur_conn_def.password
if self.cur_conn_def.use_ssl:
self.rpc_url = 'https://'
self.http_conn = httplib.HTTPSConnection(rpc_host, rpc_port, timeout=5, context=ssl._create_unverified_context())
else:
self.rpc_url = 'http://'
self.http_conn = httplib.HTTPConnection(rpc_host, rpc_port, timeout=5)
self.rpc_url += rpc_user + ':' + rpc_password + '@' + rpc_host + ':' + str(rpc_port)
log.debug('AuthServiceProxy configured to: %s' % self.rpc_url)
self.proxy = AuthServiceProxy(self.rpc_url, timeout=1000, connection=self.http_conn)
try:
# check the connection
self.http_conn.connect()
log.debug('Successfully connected AuthServiceProxy')
try:
# make the owner know, we successfully finished connection
if self.on_connection_successful_callback:
self.on_connection_successful_callback()
except:
log.exception('on_connection_finished_callback call exception')
except:
log.exception('Connection failed')
try:
# make the owner know, connection attempt failed
if self.on_connection_failed_callback:
self.on_connection_failed_callback()
if self.ssh:
# if there is a ssh connection established earlier, disconnect it because apparently it isn't
# functioning
self.ssh.disconnect()
except:
log.exception('on_connection_try_fail_callback call exception')
raise
finally:
log.debug('http_conn.close()')
self.http_conn.close()
# timeout hase been initially set to 5 seconds to perform 'quick' connection test
self.http_conn.timeout = 20
self.active = True
return self.active
def get_active_conn_description(self):
if self.cur_conn_def:
return self.cur_conn_def.get_description()
else:
return '???'
@control_rpc_call
def getblockcount(self):
if self.open():
return self.proxy.getblockcount()
else:
raise Exception('Not connected')
@control_rpc_call
def getblockchaininfo(self, verify_node: bool = True):
if self.open():
info = self.proxy.getblockchaininfo()
if verify_node:
node_under_testnet = (info.get('chain') == 'test')
if self.app_config.is_testnet and not node_under_testnet:
raise Exception('This RPC node works under Dash MAINNET, but your current configuration is '
'for TESTNET.')
elif self.app_config.is_mainnet and node_under_testnet:
raise Exception('This RPC node works under Dash TESTNET, but your current configuration is '
'for MAINNET.')
return info
else:
raise Exception('Not connected')
@control_rpc_call
def getnetworkinfo(self):
if self.open():
info = self.proxy.getnetworkinfo()
return info
else:
raise Exception('Not connected')
@control_rpc_call
def issynchronized(self):
if self.open():
try:
syn = self.proxy.mnsync('status')
return syn.get('IsSynced')
except JSONRPCException as e:
if str(e).lower().find('403 forbidden') >= 0:
self.http_conn.close()
return True
else:
raise
else:
raise Exception('Not connected')
@control_rpc_call
def mnsync(self):
if self.open():
# if connecting to HTTP(S) proxy do not call this function - it will not be exposed
if self.cur_conn_def.is_http_proxy():
return {}
else:
return self.proxy.mnsync('status')
else:
raise Exception('Not connected')
@control_rpc_call
def masternodebroadcast(self, what, hexto):
if self.open():
return self.proxy.masternodebroadcast(what, hexto)
else:
raise Exception('Not connected')
def reset_masternode_data_cache(self):
cache_item_name = 'ProtxLastReadTime_' + self.app_config.dash_network
app_cache.set_value(cache_item_name, 0)
cache_item_name = f'MasternodesLastReadTime_{self.app_config.dash_network}'
app_cache.set_value(cache_item_name, 0)
def _read_protx_list(self, data_max_age: int = PROTX_CACHE_VALID_SECONDS, feedback_fun: Optional[Callable] = None):
cache_item_name = 'ProtxLastReadTime_' + self.app_config.dash_network
last_read_time = app_cache.get_value(cache_item_name, 0, int)
if not self.protx_by_hash or data_max_age == 0 or (int(time.time()) - last_read_time) >= data_max_age:
log.info('Fetching protx data from network')
for protx_hash in self.protx_by_hash.keys():
protx = self.protx_by_hash[protx_hash]
protx.marker = False
protx.modified = False
# read protx list from the network:
protx_list = self.proxy.protx('list', 'registered', True)
app_cache.set_value(cache_item_name, int(time.time()))
# update local cache in RAM
for protx_json in protx_list:
if feedback_fun:
feedback_fun()
protx_hash = protx_json.get('proTxHash')
if protx_hash:
protx = self.protx_by_hash.get(protx_hash)
if not protx:
protx = MasternodeProtx()
self.protx_by_hash[protx_hash] = protx
protx.copy_from_json(protx_json)
protx.marker = True
# update db cache:
db_modified = False
cur = None
try:
if self.db_intf.db_active:
cur = self.db_intf.get_cursor()
for protx_hash in self.protx_by_hash.keys():
protx = self.protx_by_hash[protx_hash]
if protx.db_id is None or protx.modified:
protx.update_in_db(cur)
db_modified = True
# remove non existing protx entries
protx_to_remove = []
for protx_hash in self.protx_by_hash.keys():
protx = self.protx_by_hash[protx_hash]
if not protx.marker:
protx_to_remove.append(protx)
for protx in protx_to_remove:
protx.delete_from_db(cur)
del self.protx_by_hash[protx.protx_hash]
finally:
if db_modified:
self.db_intf.commit()
if cur is not None:
self.db_intf.release_cursor()
log.info('Finished fetching protx data from network')
return self.protx_by_hash
def _update_mn_queue_values(self, masternodes: List[Masternode]):
"""
Updates masternode payment queue order values.
"""
payment_queue = []
for mn in masternodes:
if mn.status == 'ENABLED':
protx = mn.protx
if mn.lastpaidblock > 0:
mn.queue_position = mn.lastpaidblock
else:
if protx:
mn.queue_position = protx.registered_height
else:
mn.queue_position = None
if protx:
pose_revived_height = protx.pose_revived_height
if pose_revived_height > 0 and pose_revived_height > mn.lastpaidblock:
mn.queue_position = pose_revived_height
payment_queue.append(mn)
else:
mn.queue_position = None
payment_queue.sort(key=lambda x: x.queue_position, reverse=False)
for mn in masternodes:
if mn.status == 'ENABLED':
mn.queue_position = payment_queue.index(mn)
@control_rpc_call
def get_masternodelist(self, *args, data_max_age=MASTERNODES_CACHE_VALID_SECONDS,
protx_data_max_age=PROTX_CACHE_VALID_SECONDS,
feedback_fun: Optional[Callable] = None) -> List[Masternode]:
"""
Returns masternode list, read from the Dash network or from the internal cache.
:param args: arguments passed to the 'masternodelist' RPC call
:param data_max_age: maximum age (in seconds) of the cached masternode data to used; if the
cache is older than 'data_max_age', then an RPC call is performed to load newer masternode data;
value of 0 forces reading of the new data from the network
:return: list of Masternode objects, matching the 'args' arguments
"""
if self.open():
if len(args) == 1 and args[0] == 'json':
last_read_time = app_cache.get_value(f'MasternodesLastReadTime_{self.app_config.dash_network}', 0, int)
if self.masternodes and data_max_age > 0 and int(time.time()) - last_read_time < data_max_age:
return self.masternodes
else:
self._read_protx_list(protx_data_max_age, feedback_fun=feedback_fun)
for mn in self.masternodes:
mn.marker = False # mark to delete masternode existing in cache but no longer
# existing on the network
mn.modified = False
mns_json = self.proxy.masternodelist(*args)
app_cache.set_value(f'MasternodesLastReadTime_{self.app_config.dash_network}', int(time.time()))
for mn_id in mns_json.keys():
if feedback_fun:
feedback_fun()
mn_json = mns_json.get(mn_id)
mn = self.masternodes_by_ident.get(mn_id)
if not mn:
mn = Masternode()
mn.copy_from_json(mn_id, mn_json)
self.masternodes.append(mn)
self.masternodes_by_ident[mn_id] = mn
self.masternodes_by_ip_port[mn.ip_port] = mn
else:
mn.copy_from_json(mn_id, mn_json)
mn.marker = True
protx = self.protx_by_hash.get(mn.protx_hash)
if protx and mn.protx != protx:
mn.protx = protx
self._update_mn_queue_values(self.masternodes)
# save masternodes to the db cache
db_modified = False
cur = None
try:
if self.db_intf.db_active:
cur = self.db_intf.get_cursor()
for mn in self.masternodes:
if feedback_fun:
feedback_fun()
if mn.db_id is None:
# Masternode entry not in db cache yet
if self.db_intf.db_active:
cur.execute(
"INSERT INTO MASTERNODES(ident, status, payee, "
" last_paid_time, last_paid_block, ip, protx_hash, "
" registered_height, dmt_active, dmt_create_time, queue_position) "
"VALUES (?,?,?,?,?,?,?,?,?,?,?)",
(mn.ident, mn.status, mn.payee,
mn.lastpaidtime, mn.lastpaidblock, mn.ip_port, mn.protx_hash,
mn.registered_height, 1, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
mn.queue_position))
mn.db_id = cur.lastrowid
db_modified = True
else:
if mn.modified:
cur.execute(
"UPDATE MASTERNODES set ident=?, status=?, payee=?, "
"last_paid_time=?, last_paid_block=?, ip=?, protx_hash=?, "
"registered_height=?, queue_position=? WHERE id=?",
(mn.ident, mn.status, mn.payee,
mn.lastpaidtime, mn.lastpaidblock, mn.ip_port, mn.protx_hash, mn.registered_height,
mn.queue_position, mn.db_id))
db_modified = True
# remove non existing masternodes from cache
for mn_index in reversed(range(len(self.masternodes))):
if feedback_fun:
feedback_fun()
mn = self.masternodes[mn_index]
if not mn.marker:
if self.db_intf.db_active:
cur.execute("UPDATE MASTERNODES set dmt_active=0, dmt_deactivation_time=?"
"WHERE ID=?",
(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), mn.db_id))
db_modified = True
self.masternodes_by_ident.pop(mn.ident,0)
del self.masternodes[mn_index]
finally:
if db_modified:
self.db_intf.commit()
if cur is not None:
self.db_intf.release_cursor()
return self.masternodes
else:
mns = self.proxy.masternodelist(*args)
return mns
else:
raise Exception('Not connected')
@control_rpc_call
def getaddressbalance(self, addresses):
if self.open():
return self.proxy.getaddressbalance({'addresses': addresses})
else:
raise Exception('Not connected')
@control_rpc_call
def getaddressutxos(self, addresses):
if self.open():
return self.proxy.getaddressutxos({'addresses': addresses})
else:
raise Exception('Not connected')
@control_rpc_call
def getaddressmempool(self, addresses):
if self.open():
return self.proxy.getaddressmempool({'addresses': addresses})
else:
raise Exception('Not connected')
@control_rpc_call
def getrawmempool(self):
if self.open():
return self.proxy.getrawmempool()
else:
raise Exception('Not connected')
@control_rpc_call
def getrawtransaction(self, txid, verbose, skip_cache=False):
def check_if_tx_confirmed(tx_json):
# cached transaction will not be accepted if the transaction stored in cache file was not confirmed
if tx_json.get('confirmations'):
return True
return False
if self.open():
tx_json = json_cache_wrapper(self.proxy.getrawtransaction, self, 'tx-' + str(verbose) + '-' + txid,
skip_cache=skip_cache, accept_cache_data_fun=check_if_tx_confirmed)\
(txid, verbose)
return tx_json
else:
raise Exception('Not connected')
@control_rpc_call
def getblockhash(self, blockid, skip_cache=False):
if self.open():
return json_cache_wrapper(self.proxy.getblockhash, self, 'blockhash-' + str(blockid),
skip_cache=skip_cache)(blockid)
else:
raise Exception('Not connected')
@control_rpc_call
def getblockheader(self, blockhash, skip_cache=False):
if self.open():
return json_cache_wrapper(self.proxy.getblockheader, self, 'blockheader-' + str(blockhash),
skip_cache=skip_cache)(blockhash)
else:
raise Exception('Not connected')
@control_rpc_call
def validateaddress(self, address):
if self.open():
return self.proxy.validateaddress(address)
else:
raise Exception('Not connected')
@control_rpc_call
def decoderawtransaction(self, rawtx):
if self.open():
return self.proxy.decoderawtransaction(rawtx)
else:
raise Exception('Not connected')
@control_rpc_call
def sendrawtransaction(self, tx, use_instant_send):
if self.open():
return self.proxy.sendrawtransaction(tx, False, use_instant_send)
else:
raise Exception('Not connected')
@control_rpc_call
def getcurrentvotes(self, hash):
if self.open():
return self.proxy.getcurrentvotes(hash)
else:
raise Exception('Not connected')
@control_rpc_call
def gobject(self, *args):
if self.open():
return self.proxy.gobject(*args)
else:
raise Exception('Not connected')
@control_rpc_call
def masternode(self, *args):
if self.open():
return self.proxy.masternode(*args)
else:
raise Exception('Not connected')
@control_rpc_call
def getgovernanceinfo(self):
if self.open():
return self.proxy.getgovernanceinfo()
else:
raise Exception('Not connected')
@control_rpc_call
def getsuperblockbudget(self, block_index):
if self.open():
return self.proxy.getsuperblockbudget(block_index)
else:
raise Exception('Not connected')
@control_rpc_call
def voteraw(self, masternode_tx_hash, masternode_tx_index, governance_hash, vote_signal, vote, sig_time, vote_sig):
if self.open():
return self.proxy.voteraw(masternode_tx_hash, masternode_tx_index, governance_hash, vote_signal, vote,
sig_time, vote_sig)
else:
raise Exception('Not connected')
@control_rpc_call
def getaddressdeltas(self, *args):
if self.open():
return self.proxy.getaddressdeltas(*args)
else:
raise Exception('Not connected')
@control_rpc_call
def getaddresstxids(self, *args):
if self.open():
return self.proxy.getaddresstxids(*args)
else:
raise Exception('Not connected')
def protx(self, *args):
if self.open():
return self.proxy.protx(*args)
else:
raise Exception('Not connected')
@control_rpc_call
def spork(self, *args):
if self.open():
return self.proxy.spork(*args)
else:
raise Exception('Not connected')
def rpc_call(self, encrypt_rpc_arguments: bool, allow_switching_conns: bool, command: str, *args):
def call_command(self, *args):
c = self.proxy.__getattr__(command)
return c(*args)
if self.open():
call_command.__setattr__('__name__', command)
fun = control_rpc_call(call_command, encrypt_rpc_arguments=encrypt_rpc_arguments,
allow_switching_conns=allow_switching_conns)
c = fun(self, *args)
return c
else:
raise Exception('Not connected')
@control_rpc_call
def listaddressbalances(self, minfee):
if self.open():
return self.proxy.listaddressbalances(minfee)
else:
raise Exception('Not connected')
@control_rpc_call
def checkfeaturesupport(self, feature_name: str, dmt_version: str, *args) -> Dict:
if self.open():
return self.proxy.checkfeaturesupport(feature_name, dmt_version)
else:
raise Exception('Not connected')
def get_block_timestamp(self, block: int):
ts = self.block_timestamps.get(block)
if ts is None:
bhash = self.getblockhash(block)
bh = self.getblockheader(bhash)
ts = bh['time']
self.block_timestamps[block] = ts
return ts
def fetch_mempool_txes(self, feedback_fun: Optional[Callable] = None):
cur_mempool_txes = self.proxy.getrawmempool()
txes_to_purge = []
for tx_hash in self.mempool_txes:
if tx_hash not in cur_mempool_txes:
txes_to_purge.append(tx_hash)
for tx_hash in txes_to_purge:
del self.mempool_txes[tx_hash]
for tx_hash in cur_mempool_txes:
if feedback_fun:
feedback_fun()
tx = self.mempool_txes.get(tx_hash)
if not tx:
tx = self.getrawtransaction(tx_hash, True, skip_cache=True)
self.mempool_txes[tx_hash] = tx
def is_protx_update_pending(self, protx_hash: str, ip_port: str = None) -> bool:
"""
Check whether a protx transaction related to the proregtx passed as an argument exists in mempool.
:param protx_hash: Hash of the ProRegTx transaction
:return:
"""
try:
for tx_hash in self.mempool_txes:
tx = self.mempool_txes[tx_hash]
protx = tx.get('proUpRegTx')
if not protx:
protx = tx.get('proUpRevTx')
if not protx:
protx = tx.get('proUpServTx')
if not protx:
protx = tx.get('proRegTx')
if protx and (protx.get('proTxHash') == protx_hash) or (ip_port and protx.get('service') == ip_port):
return True
return False
except Exception as e:
return False
| mit |
Ruide/angr-dev | angr/angr/analyses/identifier/functions/atoi.py | 5 | 2123 |
import random
import string
from ..func import Func, TestData
class atoi(Func):
def __init__(self):
super(atoi, self).__init__()
self.skips_whitespace = False
self.allows_negative = True
def rand_str(self, length, byte_list=None): #pylint disable=no-self-use
if byte_list is None:
return "".join(chr(random.randint(0, 255)) for _ in xrange(length))
return "".join(random.choice(byte_list) for _ in xrange(length))
def num_args(self):
return 1
def get_name(self):
if self.allows_negative:
suffix = ""
else:
suffix = "_no_signs"
if self.skips_whitespace:
return "atoi_whitespace_skip" + suffix
return "atoi" + suffix
def gen_input_output_pair(self):
num = random.randint(-(2**26), 2**26-1)
if not self.allows_negative:
num = abs(num)
s = str(num)
test_input = [s]
test_output = [s]
return_val = num
max_steps = 20
return TestData(test_input, test_output, return_val, max_steps)
def pre_test(self, func, runner):
num = random.randint(-(2 ** 26), 2 ** 26 - 1)
num = abs(num)
s = str(num)
test_input = [s]
test_output = [s]
return_val = num
max_steps = 20
test = TestData(test_input, test_output, return_val, max_steps)
if not runner.test(func, test):
return False
s = str(num)
s = self.rand_str(10, string.whitespace) + s
test_input = [s]
test_output = [s]
return_val = num
max_steps = 20
test = TestData(test_input, test_output, return_val, max_steps)
self.skips_whitespace = runner.test(func, test)
num = -random.randint(2000, 8000)
s = str(num)
test_input = [s]
test_output = [s]
return_val = num
max_steps = 20
test = TestData(test_input, test_output, return_val, max_steps)
if not runner.test(func, test):
self.allows_negative = False
return True
| bsd-2-clause |
hockeybuggy/twitter-sentiment | src/conductor.py | 1 | 3409 | #!/usr/bin/env python
# File : conductor.py
# Author : Douglas Anderson
# Description: Simple driver for sentiment analysis implementation
import os, sys
import tokenize
import normalize
import labelselect
import statsify
import wordselection
import dictizer
import split_dataset
from Token import Token
from parse_args import parse_args
from train import maxent_classifier
from train import maxent_classifier_with_validation
from train import naive_bayes_classifier
if __name__ == "__main__":
args = parse_args()
print "Opening dataset..."
tokens = tokenize.open_tweets_file("../data/b.tsv", 0, args.items)
print "Selecting labels..."
tokens = labelselect.__call__(tokens, args.labels) # Select only the labels
print "Normalizing dataset..."
#tokens = normalize.__call__(tokens) # Normalize the tokens
if args.normalize and args.normalize_words:
normalize.normalize_words(tokens)
if args.normalize and args.normalize_punct:
normalize.normalize_punct(tokens)
if args.normalize and args.normalize_emoticons:
normalize.normalize_emoticons(tokens)
if args.normalize and args.normalize_users:
normalize.normalize_users(tokens)
if args.normalize and args.normalize_hashtags:
normalize.normalize_hashtags(tokens)
if args.normalize and args.normalize_nums:
normalize.normalize_nums(tokens)
if args.normalize and args.normalize_urls:
normalize.normalize_urls(tokens)
print "Transforming dataset..."
feature_list = dictizer.__call__(tokens)
docfreq = wordselection.calculate_docfreq(feature_list)
if args.stopword_removal:
print "Removing stopwords from the dataset..."
feature_list = wordselection.remove_stopwords(feature_list)
if args.uncommon_selection:
print "Removing uncommon words from the dataset..."
feature_list = wordselection.remove_uncommon(feature_list, docfreq, args.df_cutoff)
wordselection.print_reatined_features(docfreq, args.df_cutoff)
# Write the features out to a file
with open("filtered_docs.txt", "w") as w:
for row in feature_list:
w.write(str(row[0]) + "\n")
print "Generating feature set statistics..."
statsify.__call__(feature_list, args.labels)
print "Splitting the dataset..."
if args.validation_metric == "none":
train_set, _, test_set = split_dataset.__call__(feature_list, 0.2)
else:
train_set, validation_set, test_set = split_dataset.__call__(feature_list, 0.2, validation_size=0.2)
if args.classifier_type == "max_ent":
if args.minlldelta:
classifier = maxent_classifier(train_set, lldelta=args.minlldelta)
elif args.minll:
classifier = maxent_classifier(train_set, ll=args.minll)
elif args.validation_metric != "none":
classifier = maxent_classifier_with_validation(train_set, validation_set,
args.validation_metric, 3)
elif args.numIterations:
classifier = maxent_classifier(train_set, iterations=args.numIterations)
else:
print "Error no cut off set"
sys.exit(0)
else:
classifier = naive_bayes_classifier(train_set)
print "\nTesting"
classifier.test(test_set, args.labels)
classifier.show_informative_features(30)
#classifier.inspect_errors(test_set)
| mit |
dannyboi104/SickRage | lib/dogpile/cache/util.py | 47 | 5679 | from hashlib import sha1
import inspect
import re
import collections
from . import compat
def coerce_string_conf(d):
result = {}
for k, v in d.items():
if not isinstance(v, compat.string_types):
result[k] = v
continue
v = v.strip()
if re.match(r'^[-+]?\d+$', v):
result[k] = int(v)
elif re.match(r'^[-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?$', v):
result[k] = float(v)
elif v.lower() in ('false', 'true'):
result[k] = v.lower() == 'true'
elif v == 'None':
result[k] = None
else:
result[k] = v
return result
class PluginLoader(object):
def __init__(self, group):
self.group = group
self.impls = {}
def load(self, name):
if name in self.impls:
return self.impls[name]()
else: # pragma NO COVERAGE
import pkg_resources
for impl in pkg_resources.iter_entry_points(
self.group, name):
self.impls[name] = impl.load
return impl.load()
else:
raise Exception(
"Can't load plugin %s %s" %
(self.group, name))
def register(self, name, modulepath, objname):
def load():
mod = __import__(modulepath, fromlist=[objname])
return getattr(mod, objname)
self.impls[name] = load
def function_key_generator(namespace, fn, to_str=compat.string_type):
"""Return a function that generates a string
key, based on a given function as well as
arguments to the returned function itself.
This is used by :meth:`.CacheRegion.cache_on_arguments`
to generate a cache key from a decorated function.
It can be replaced using the ``function_key_generator``
argument passed to :func:`.make_region`.
"""
if namespace is None:
namespace = '%s:%s' % (fn.__module__, fn.__name__)
else:
namespace = '%s:%s|%s' % (fn.__module__, fn.__name__, namespace)
args = inspect.getargspec(fn)
has_self = args[0] and args[0][0] in ('self', 'cls')
def generate_key(*args, **kw):
if kw:
raise ValueError(
"dogpile.cache's default key creation "
"function does not accept keyword arguments.")
if has_self:
args = args[1:]
return namespace + "|" + " ".join(map(to_str, args))
return generate_key
def function_multi_key_generator(namespace, fn, to_str=compat.string_type):
if namespace is None:
namespace = '%s:%s' % (fn.__module__, fn.__name__)
else:
namespace = '%s:%s|%s' % (fn.__module__, fn.__name__, namespace)
args = inspect.getargspec(fn)
has_self = args[0] and args[0][0] in ('self', 'cls')
def generate_keys(*args, **kw):
if kw:
raise ValueError(
"dogpile.cache's default key creation "
"function does not accept keyword arguments.")
if has_self:
args = args[1:]
return [namespace + "|" + key for key in map(to_str, args)]
return generate_keys
def sha1_mangle_key(key):
"""a SHA1 key mangler."""
return sha1(key).hexdigest()
def length_conditional_mangler(length, mangler):
"""a key mangler that mangles if the length of the key is
past a certain threshold.
"""
def mangle(key):
if len(key) >= length:
return mangler(key)
else:
return key
return mangle
class memoized_property(object):
"""A read-only @property that is only evaluated once."""
def __init__(self, fget, doc=None):
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
def __get__(self, obj, cls):
if obj is None:
return self
obj.__dict__[self.__name__] = result = self.fget(obj)
return result
def to_list(x, default=None):
"""Coerce to a list."""
if x is None:
return default
if not isinstance(x, (list, tuple)):
return [x]
else:
return x
class KeyReentrantMutex(object):
def __init__(self, key, mutex, keys):
self.key = key
self.mutex = mutex
self.keys = keys
@classmethod
def factory(cls, mutex):
# this collection holds zero or one
# thread idents as the key; a set of
# keynames held as the value.
keystore = collections.defaultdict(set)
def fac(key):
return KeyReentrantMutex(key, mutex, keystore)
return fac
def acquire(self, wait=True):
current_thread = compat.threading.current_thread().ident
keys = self.keys.get(current_thread)
if keys is not None and \
self.key not in keys:
# current lockholder, new key. add it in
keys.add(self.key)
return True
elif self.mutex.acquire(wait=wait):
# after acquire, create new set and add our key
self.keys[current_thread].add(self.key)
return True
else:
return False
def release(self):
current_thread = compat.threading.current_thread().ident
keys = self.keys.get(current_thread)
assert keys is not None, "this thread didn't do the acquire"
assert self.key in keys, "No acquire held for key '%s'" % self.key
keys.remove(self.key)
if not keys:
# when list of keys empty, remove
# the thread ident and unlock.
del self.keys[current_thread]
self.mutex.release()
| gpl-3.0 |
pawelmhm/scrapy | tests/test_webclient.py | 3 | 16648 | """
from twisted.internet import defer
Tests borrowed from the twisted.web.client tests.
"""
import os
import shutil
import sys
from pkg_resources import parse_version
import cryptography
import OpenSSL.SSL
from twisted.trial import unittest
from twisted.web import server, static, util, resource
from twisted.internet import reactor, defer
try:
from twisted.internet.testing import StringTransport
except ImportError:
# deprecated in Twisted 19.7.0
# (remove once we bump our requirement past that version)
from twisted.test.proto_helpers import StringTransport
from twisted.python.filepath import FilePath
from twisted.protocols.policies import WrappingFactory
from twisted.internet.defer import inlineCallbacks
from twisted.web.test.test_webclient import (
ForeverTakingResource,
ErrorResource,
NoLengthResource,
HostHeaderResource,
PayloadResource,
BrokenDownloadResource,
)
from scrapy.core.downloader import webclient as client
from scrapy.core.downloader.contextfactory import ScrapyClientContextFactory
from scrapy.http import Request, Headers
from scrapy.settings import Settings
from scrapy.utils.misc import create_instance
from scrapy.utils.python import to_bytes, to_unicode
from tests.mockserver import ssl_context_factory
def getPage(url, contextFactory=None, response_transform=None, *args, **kwargs):
"""Adapted version of twisted.web.client.getPage"""
def _clientfactory(url, *args, **kwargs):
url = to_unicode(url)
timeout = kwargs.pop('timeout', 0)
f = client.ScrapyHTTPClientFactory(
Request(url, *args, **kwargs), timeout=timeout)
f.deferred.addCallback(response_transform or (lambda r: r.body))
return f
from twisted.web.client import _makeGetterFactory
return _makeGetterFactory(
to_bytes(url), _clientfactory, contextFactory=contextFactory, *args, **kwargs
).deferred
class ParseUrlTestCase(unittest.TestCase):
"""Test URL parsing facility and defaults values."""
def _parse(self, url):
f = client.ScrapyHTTPClientFactory(Request(url))
return (f.scheme, f.netloc, f.host, f.port, f.path)
def testParse(self):
lip = '127.0.0.1'
tests = (
("http://127.0.0.1?c=v&c2=v2#fragment", ('http', lip, lip, 80, '/?c=v&c2=v2')),
("http://127.0.0.1/?c=v&c2=v2#fragment", ('http', lip, lip, 80, '/?c=v&c2=v2')),
("http://127.0.0.1/foo?c=v&c2=v2#frag", ('http', lip, lip, 80, '/foo?c=v&c2=v2')),
("http://127.0.0.1:100?c=v&c2=v2#fragment", ('http', lip + ':100', lip, 100, '/?c=v&c2=v2')),
("http://127.0.0.1:100/?c=v&c2=v2#frag", ('http', lip + ':100', lip, 100, '/?c=v&c2=v2')),
("http://127.0.0.1:100/foo?c=v&c2=v2#frag", ('http', lip + ':100', lip, 100, '/foo?c=v&c2=v2')),
("http://127.0.0.1", ('http', lip, lip, 80, '/')),
("http://127.0.0.1/", ('http', lip, lip, 80, '/')),
("http://127.0.0.1/foo", ('http', lip, lip, 80, '/foo')),
("http://127.0.0.1?param=value", ('http', lip, lip, 80, '/?param=value')),
("http://127.0.0.1/?param=value", ('http', lip, lip, 80, '/?param=value')),
("http://127.0.0.1:12345/foo", ('http', lip + ':12345', lip, 12345, '/foo')),
("http://spam:12345/foo", ('http', 'spam:12345', 'spam', 12345, '/foo')),
("http://spam.test.org/foo", ('http', 'spam.test.org', 'spam.test.org', 80, '/foo')),
("https://127.0.0.1/foo", ('https', lip, lip, 443, '/foo')),
("https://127.0.0.1/?param=value", ('https', lip, lip, 443, '/?param=value')),
("https://127.0.0.1:12345/", ('https', lip + ':12345', lip, 12345, '/')),
("http://scrapytest.org/foo ", ('http', 'scrapytest.org', 'scrapytest.org', 80, '/foo')),
("http://egg:7890 ", ('http', 'egg:7890', 'egg', 7890, '/')),
)
for url, test in tests:
test = tuple(
to_bytes(x) if not isinstance(x, int) else x for x in test)
self.assertEqual(client._parse(url), test, url)
class ScrapyHTTPPageGetterTests(unittest.TestCase):
def test_earlyHeaders(self):
# basic test stolen from twisted HTTPageGetter
factory = client.ScrapyHTTPClientFactory(Request(
url='http://foo/bar',
body="some data",
headers={
'Host': 'example.net',
'User-Agent': 'fooble',
'Cookie': 'blah blah',
'Content-Length': '12981',
'Useful': 'value'}))
self._test(
factory,
b"GET /bar HTTP/1.0\r\n"
b"Content-Length: 9\r\n"
b"Useful: value\r\n"
b"Connection: close\r\n"
b"User-Agent: fooble\r\n"
b"Host: example.net\r\n"
b"Cookie: blah blah\r\n"
b"\r\n"
b"some data")
# test minimal sent headers
factory = client.ScrapyHTTPClientFactory(Request('http://foo/bar'))
self._test(
factory,
b"GET /bar HTTP/1.0\r\n"
b"Host: foo\r\n"
b"\r\n")
# test a simple POST with body and content-type
factory = client.ScrapyHTTPClientFactory(Request(
method='POST',
url='http://foo/bar',
body='name=value',
headers={'Content-Type': 'application/x-www-form-urlencoded'}))
self._test(
factory,
b"POST /bar HTTP/1.0\r\n"
b"Host: foo\r\n"
b"Connection: close\r\n"
b"Content-Type: application/x-www-form-urlencoded\r\n"
b"Content-Length: 10\r\n"
b"\r\n"
b"name=value")
# test a POST method with no body provided
factory = client.ScrapyHTTPClientFactory(Request(
method='POST',
url='http://foo/bar'
))
self._test(
factory,
b"POST /bar HTTP/1.0\r\n"
b"Host: foo\r\n"
b"Content-Length: 0\r\n"
b"\r\n")
# test with single and multivalued headers
factory = client.ScrapyHTTPClientFactory(Request(
url='http://foo/bar',
headers={
'X-Meta-Single': 'single',
'X-Meta-Multivalued': ['value1', 'value2'],
},
))
self._test(
factory,
b"GET /bar HTTP/1.0\r\n"
b"Host: foo\r\n"
b"X-Meta-Multivalued: value1\r\n"
b"X-Meta-Multivalued: value2\r\n"
b"X-Meta-Single: single\r\n"
b"\r\n")
# same test with single and multivalued headers but using Headers class
factory = client.ScrapyHTTPClientFactory(Request(
url='http://foo/bar',
headers=Headers({
'X-Meta-Single': 'single',
'X-Meta-Multivalued': ['value1', 'value2'],
}),
))
self._test(
factory,
b"GET /bar HTTP/1.0\r\n"
b"Host: foo\r\n"
b"X-Meta-Multivalued: value1\r\n"
b"X-Meta-Multivalued: value2\r\n"
b"X-Meta-Single: single\r\n"
b"\r\n")
def _test(self, factory, testvalue):
transport = StringTransport()
protocol = client.ScrapyHTTPPageGetter()
protocol.factory = factory
protocol.makeConnection(transport)
self.assertEqual(
set(transport.value().splitlines()),
set(testvalue.splitlines()))
return testvalue
def test_non_standard_line_endings(self):
# regression test for: http://dev.scrapy.org/ticket/258
factory = client.ScrapyHTTPClientFactory(Request(
url='http://foo/bar'))
protocol = client.ScrapyHTTPPageGetter()
protocol.factory = factory
protocol.headers = Headers()
protocol.dataReceived(b"HTTP/1.0 200 OK\n")
protocol.dataReceived(b"Hello: World\n")
protocol.dataReceived(b"Foo: Bar\n")
protocol.dataReceived(b"\n")
self.assertEqual(protocol.headers, Headers({'Hello': ['World'], 'Foo': ['Bar']}))
class EncodingResource(resource.Resource):
out_encoding = 'cp1251'
def render(self, request):
body = to_unicode(request.content.read())
request.setHeader(b'content-encoding', self.out_encoding)
return body.encode(self.out_encoding)
class WebClientTestCase(unittest.TestCase):
def _listen(self, site):
return reactor.listenTCP(0, site, interface="127.0.0.1")
def setUp(self):
self.tmpname = self.mktemp()
os.mkdir(self.tmpname)
FilePath(self.tmpname).child("file").setContent(b"0123456789")
r = static.File(self.tmpname)
r.putChild(b"redirect", util.Redirect(b"/file"))
r.putChild(b"wait", ForeverTakingResource())
r.putChild(b"error", ErrorResource())
r.putChild(b"nolength", NoLengthResource())
r.putChild(b"host", HostHeaderResource())
r.putChild(b"payload", PayloadResource())
r.putChild(b"broken", BrokenDownloadResource())
r.putChild(b"encoding", EncodingResource())
self.site = server.Site(r, timeout=None)
self.wrapper = WrappingFactory(self.site)
self.port = self._listen(self.wrapper)
self.portno = self.port.getHost().port
@inlineCallbacks
def tearDown(self):
yield self.port.stopListening()
shutil.rmtree(self.tmpname)
def getURL(self, path):
return f"http://127.0.0.1:{self.portno}/{path}"
def testPayload(self):
s = "0123456789" * 10
return getPage(self.getURL("payload"), body=s).addCallback(
self.assertEqual, to_bytes(s))
def testHostHeader(self):
# if we pass Host header explicitly, it should be used, otherwise
# it should extract from url
return defer.gatherResults([
getPage(self.getURL("host")).addCallback(
self.assertEqual, to_bytes(f"127.0.0.1:{self.portno}")),
getPage(self.getURL("host"), headers={"Host": "www.example.com"}).addCallback(
self.assertEqual, to_bytes("www.example.com"))])
def test_getPage(self):
"""
L{client.getPage} returns a L{Deferred} which is called back with
the body of the response if the default method B{GET} is used.
"""
d = getPage(self.getURL("file"))
d.addCallback(self.assertEqual, b"0123456789")
return d
def test_getPageHead(self):
"""
L{client.getPage} returns a L{Deferred} which is called back with
the empty string if the method is C{HEAD} and there is a successful
response code.
"""
def _getPage(method):
return getPage(self.getURL("file"), method=method)
return defer.gatherResults([
_getPage("head").addCallback(self.assertEqual, b""),
_getPage("HEAD").addCallback(self.assertEqual, b"")])
def test_timeoutNotTriggering(self):
"""
When a non-zero timeout is passed to L{getPage} and the page is
retrieved before the timeout period elapses, the L{Deferred} is
called back with the contents of the page.
"""
d = getPage(self.getURL("host"), timeout=100)
d.addCallback(
self.assertEqual, to_bytes(f"127.0.0.1:{self.portno}"))
return d
def test_timeoutTriggering(self):
"""
When a non-zero timeout is passed to L{getPage} and that many
seconds elapse before the server responds to the request. the
L{Deferred} is errbacked with a L{error.TimeoutError}.
"""
finished = self.assertFailure(
getPage(self.getURL("wait"), timeout=0.000001),
defer.TimeoutError)
def cleanup(passthrough):
# Clean up the server which is hanging around not doing
# anything.
connected = list(self.wrapper.protocols.keys())
# There might be nothing here if the server managed to already see
# that the connection was lost.
if connected:
connected[0].transport.loseConnection()
return passthrough
finished.addBoth(cleanup)
return finished
def testNotFound(self):
return getPage(self.getURL('notsuchfile')).addCallback(self._cbNoSuchFile)
def _cbNoSuchFile(self, pageData):
self.assertIn(b'404 - No Such Resource', pageData)
def testFactoryInfo(self):
url = self.getURL('file')
_, _, host, port, _ = client._parse(url)
factory = client.ScrapyHTTPClientFactory(Request(url))
reactor.connectTCP(to_unicode(host), port, factory)
return factory.deferred.addCallback(self._cbFactoryInfo, factory)
def _cbFactoryInfo(self, ignoredResult, factory):
self.assertEqual(factory.status, b'200')
self.assertTrue(factory.version.startswith(b'HTTP/'))
self.assertEqual(factory.message, b'OK')
self.assertEqual(factory.response_headers[b'content-length'], b'10')
def testRedirect(self):
return getPage(self.getURL("redirect")).addCallback(self._cbRedirect)
def _cbRedirect(self, pageData):
self.assertEqual(
pageData,
b'\n<html>\n <head>\n <meta http-equiv="refresh" content="0;URL=/file">\n'
b' </head>\n <body bgcolor="#FFFFFF" text="#000000">\n '
b'<a href="/file">click here</a>\n </body>\n</html>\n')
def test_encoding(self):
""" Test that non-standart body encoding matches
Content-Encoding header """
body = b'\xd0\x81\xd1\x8e\xd0\xaf'
dfd = getPage(self.getURL('encoding'), body=body, response_transform=lambda r: r)
return dfd.addCallback(self._check_Encoding, body)
def _check_Encoding(self, response, original_body):
content_encoding = to_unicode(response.headers[b'Content-Encoding'])
self.assertEqual(content_encoding, EncodingResource.out_encoding)
self.assertEqual(
response.body.decode(content_encoding), to_unicode(original_body))
class WebClientSSLTestCase(unittest.TestCase):
context_factory = None
def _listen(self, site):
return reactor.listenSSL(
0, site,
contextFactory=self.context_factory or ssl_context_factory(),
interface="127.0.0.1")
def getURL(self, path):
return f"https://127.0.0.1:{self.portno}/{path}"
def setUp(self):
self.tmpname = self.mktemp()
os.mkdir(self.tmpname)
FilePath(self.tmpname).child("file").setContent(b"0123456789")
r = static.File(self.tmpname)
r.putChild(b"payload", PayloadResource())
self.site = server.Site(r, timeout=None)
self.wrapper = WrappingFactory(self.site)
self.port = self._listen(self.wrapper)
self.portno = self.port.getHost().port
@inlineCallbacks
def tearDown(self):
yield self.port.stopListening()
shutil.rmtree(self.tmpname)
def testPayload(self):
s = "0123456789" * 10
return getPage(self.getURL("payload"), body=s).addCallback(
self.assertEqual, to_bytes(s))
class WebClientCustomCiphersSSLTestCase(WebClientSSLTestCase):
# we try to use a cipher that is not enabled by default in OpenSSL
custom_ciphers = 'CAMELLIA256-SHA'
context_factory = ssl_context_factory(cipher_string=custom_ciphers)
def testPayload(self):
s = "0123456789" * 10
settings = Settings({'DOWNLOADER_CLIENT_TLS_CIPHERS': self.custom_ciphers})
client_context_factory = create_instance(ScrapyClientContextFactory, settings=settings, crawler=None)
return getPage(
self.getURL("payload"), body=s, contextFactory=client_context_factory
).addCallback(self.assertEqual, to_bytes(s))
def testPayloadDisabledCipher(self):
if sys.implementation.name == "pypy" and parse_version(cryptography.__version__) <= parse_version("2.3.1"):
self.skipTest("This test expects a failure, but the code does work in PyPy with cryptography<=2.3.1")
s = "0123456789" * 10
settings = Settings({'DOWNLOADER_CLIENT_TLS_CIPHERS': 'ECDHE-RSA-AES256-GCM-SHA384'})
client_context_factory = create_instance(ScrapyClientContextFactory, settings=settings, crawler=None)
d = getPage(self.getURL("payload"), body=s, contextFactory=client_context_factory)
return self.assertFailure(d, OpenSSL.SSL.Error)
| bsd-3-clause |
srippa/nn_deep | assignment1/cs231n/classifiers/softmax.py | 3 | 2307 | import numpy as np
from random import shuffle
def softmax_loss_naive(W, X, y, reg):
"""
Softmax loss function, naive implementation (with loops)
Inputs:
- W: C x D array of weights
- X: D x N array of data. Data are D-dimensional columns
- y: 1-dimensional array of length N with labels 0...K-1, for K classes
- reg: (float) regularization strength
Returns:
a tuple of:
- loss as single float
- gradient with respect to weights W, an array of same size as W
"""
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
#############################################################################
# TODO: Compute the softmax loss and its gradient using explicit loops. #
# Store the loss in loss and the gradient in dW. If you are not careful #
# here, it is easy to run into numeric instability. Don't forget the #
# regularization! #
#############################################################################
pass
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, dW
def softmax_loss_vectorized(W, X, y, reg):
"""
Softmax loss function, vectorized version.
Inputs and outputs are the same as softmax_loss_naive.
"""
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
#############################################################################
# TODO: Compute the softmax loss and its gradient using no explicit loops. #
# Store the loss in loss and the gradient in dW. If you are not careful #
# here, it is easy to run into numeric instability. Don't forget the #
# regularization! #
#############################################################################
pass
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, dW
| mit |
opennode/waldur-mastermind | src/waldur_slurm/handlers.py | 1 | 2767 | import functools
from django.conf import settings
from django.db import transaction
from django.db.models import Sum
from waldur_core.core import utils as core_utils
from waldur_freeipa import models as freeipa_models
from . import models, tasks, utils
def if_plugin_enabled(f):
"""Calls decorated handler only if plugin is enabled."""
@functools.wraps(f)
def wrapped(*args, **kwargs):
if settings.WALDUR_SLURM['ENABLED']:
return f(*args, **kwargs)
return wrapped
@if_plugin_enabled
def process_user_creation(sender, instance, created=False, **kwargs):
if not created:
return
transaction.on_commit(
lambda: tasks.add_user.delay(core_utils.serialize_instance(instance))
)
@if_plugin_enabled
def process_user_deletion(sender, instance, **kwargs):
transaction.on_commit(
lambda: tasks.delete_user.delay(core_utils.serialize_instance(instance))
)
@if_plugin_enabled
def process_role_granted(sender, structure, user, role, **kwargs):
try:
freeipa_profile = freeipa_models.Profile.objects.get(user=user)
serialized_profile = core_utils.serialize_instance(freeipa_profile)
serialized_structure = core_utils.serialize_instance(structure)
transaction.on_commit(
lambda: tasks.process_role_granted.delay(
serialized_profile, serialized_structure
)
)
except freeipa_models.Profile.DoesNotExist:
pass
@if_plugin_enabled
def process_role_revoked(sender, structure, user, role, **kwargs):
try:
freeipa_profile = freeipa_models.Profile.objects.get(user=user)
serialized_profile = core_utils.serialize_instance(freeipa_profile)
serialized_structure = core_utils.serialize_instance(structure)
transaction.on_commit(
lambda: tasks.process_role_revoked.delay(
serialized_profile, serialized_structure
)
)
except freeipa_models.Profile.DoesNotExist:
pass
@if_plugin_enabled
def update_quotas_on_allocation_usage_update(sender, instance, created=False, **kwargs):
if created:
return
allocation = instance
if not allocation.usage_changed():
return
project = allocation.project
update_quotas(project, models.Allocation.Permissions.project_path)
update_quotas(project.customer, models.Allocation.Permissions.customer_path)
def update_quotas(scope, path):
qs = models.Allocation.objects.filter(**{path: scope}).values(path)
for quota in utils.FIELD_NAMES:
qs = qs.annotate(**{'total_%s' % quota: Sum(quota)})
qs = list(qs)[0]
for quota in utils.FIELD_NAMES:
scope.set_quota_usage(utils.MAPPING[quota], qs['total_%s' % quota])
| mit |
chris-chambers/llvm | test/CodeGen/SystemZ/Large/spill-01.py | 23 | 1245 | # Test cases where MVC is used for spill slots that end up being out of range.
# RUN: python %s | llc -mtriple=s390x-linux-gnu | FileCheck %s
# There are 8 usable call-saved GPRs, two of which are needed for the base
# registers. The first 160 bytes of the frame are needed for the ABI
# call frame, and a further 8 bytes are needed for the emergency spill slot.
# That means we will have at least one out-of-range slot if:
#
# count == (4096 - 168) / 8 + 6 + 1 == 498
#
# Add in some extra room and check both %r15+4096 (the first out-of-range slot)
# and %r15+4104.
#
# CHECK: f1:
# CHECK: lay [[REG:%r[0-5]]], 4096(%r15)
# CHECK: mvc 0(8,[[REG]]), {{[0-9]+}}({{%r[0-9]+}})
# CHECK: brasl %r14, foo@PLT
# CHECK: lay [[REG:%r[0-5]]], 4096(%r15)
# CHECK: mvc {{[0-9]+}}(8,{{%r[0-9]+}}), 8([[REG]])
# CHECK: br %r14
count = 500
print 'declare void @foo()'
print ''
print 'define void @f1(i64 *%base0, i64 *%base1) {'
for i in range(count):
print ' %%ptr%d = getelementptr i64 *%%base%d, i64 %d' % (i, i % 2, i / 2)
print ' %%val%d = load i64 *%%ptr%d' % (i, i)
print ''
print ' call void @foo()'
print ''
for i in range(count):
print ' store i64 %%val%d, i64 *%%ptr%d' % (i, i)
print ''
print ' ret void'
print '}'
| gpl-3.0 |
LuqmanSahaf/kubernetes | third_party/htpasswd/htpasswd.py | 897 | 5219 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2013 Edgewall Software
# Copyright (C) 2008 Eli Carter
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
"""Replacement for htpasswd"""
import os
import sys
import random
from optparse import OptionParser
# We need a crypt module, but Windows doesn't have one by default. Try to find
# one, and tell the user if we can't.
try:
import crypt
except ImportError:
try:
import fcrypt as crypt
except ImportError:
sys.stderr.write("Cannot find a crypt module. "
"Possibly http://carey.geek.nz/code/python-fcrypt/\n")
sys.exit(1)
def wait_for_file_mtime_change(filename):
"""This function is typically called before a file save operation,
waiting if necessary for the file modification time to change. The
purpose is to avoid successive file updates going undetected by the
caching mechanism that depends on a change in the file modification
time to know when the file should be reparsed."""
try:
mtime = os.stat(filename).st_mtime
os.utime(filename, None)
while mtime == os.stat(filename).st_mtime:
time.sleep(1e-3)
os.utime(filename, None)
except OSError:
pass # file doesn't exist (yet)
def salt():
"""Returns a string of 2 randome letters"""
letters = 'abcdefghijklmnopqrstuvwxyz' \
'ABCDEFGHIJKLMNOPQRSTUVWXYZ' \
'0123456789/.'
return random.choice(letters) + random.choice(letters)
class HtpasswdFile:
"""A class for manipulating htpasswd files."""
def __init__(self, filename, create=False):
self.entries = []
self.filename = filename
if not create:
if os.path.exists(self.filename):
self.load()
else:
raise Exception("%s does not exist" % self.filename)
def load(self):
"""Read the htpasswd file into memory."""
lines = open(self.filename, 'r').readlines()
self.entries = []
for line in lines:
username, pwhash = line.split(':')
entry = [username, pwhash.rstrip()]
self.entries.append(entry)
def save(self):
"""Write the htpasswd file to disk"""
wait_for_file_mtime_change(self.filename)
open(self.filename, 'w').writelines(["%s:%s\n" % (entry[0], entry[1])
for entry in self.entries])
def update(self, username, password):
"""Replace the entry for the given user, or add it if new."""
pwhash = crypt.crypt(password, salt())
matching_entries = [entry for entry in self.entries
if entry[0] == username]
if matching_entries:
matching_entries[0][1] = pwhash
else:
self.entries.append([username, pwhash])
def delete(self, username):
"""Remove the entry for the given user."""
self.entries = [entry for entry in self.entries
if entry[0] != username]
def main():
"""
%prog -b[c] filename username password
%prog -D filename username"""
# For now, we only care about the use cases that affect tests/functional.py
parser = OptionParser(usage=main.__doc__)
parser.add_option('-b', action='store_true', dest='batch', default=False,
help='Batch mode; password is passed on the command line IN THE CLEAR.'
)
parser.add_option('-c', action='store_true', dest='create', default=False,
help='Create a new htpasswd file, overwriting any existing file.')
parser.add_option('-D', action='store_true', dest='delete_user',
default=False, help='Remove the given user from the password file.')
options, args = parser.parse_args()
def syntax_error(msg):
"""Utility function for displaying fatal error messages with usage
help.
"""
sys.stderr.write("Syntax error: " + msg)
sys.stderr.write(parser.get_usage())
sys.exit(1)
if not (options.batch or options.delete_user):
syntax_error("Only batch and delete modes are supported\n")
# Non-option arguments
if len(args) < 2:
syntax_error("Insufficient number of arguments.\n")
filename, username = args[:2]
if options.delete_user:
if len(args) != 2:
syntax_error("Incorrect number of arguments.\n")
password = None
else:
if len(args) != 3:
syntax_error("Incorrect number of arguments.\n")
password = args[2]
passwdfile = HtpasswdFile(filename, create=options.create)
if options.delete_user:
passwdfile.delete(username)
else:
passwdfile.update(username, password)
passwdfile.save()
if __name__ == '__main__':
main()
| apache-2.0 |
Flyingfox646/flyingfox | src/stats/migrations/0008_auto_20151119_2230.py | 2 | 2062 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stats', '0007_object_is_playable'),
]
operations = [
migrations.AlterField(
model_name='object',
name='cls',
field=models.CharField(max_length=24, choices=[('aaa_heavy', 'aaa_heavy'), ('aaa_light', 'aaa_light'), ('aaa_mg', 'aaa_mg'), ('aircraft_gunner', 'aircraft_gunner'), ('aircraft_heavy', 'aircraft_heavy'), ('aircraft_light', 'aircraft_light'), ('aircraft_medium', 'aircraft_medium'), ('aircraft_pilot', 'aircraft_pilot'), ('aircraft_static', 'aircraft_static'), ('aircraft_transport', 'aircraft_transport'), ('aircraft_turret', 'aircraft_turret'), ('armoured_vehicle', 'armoured_vehicle'), ('artillery_field', 'artillery_field'), ('artillery_howitzer', 'artillery_howitzer'), ('artillery_rocket', 'artillery_rocket'), ('block', 'block'), ('bomb', 'bomb'), ('bullet', 'bullet'), ('car', 'car'), ('driver', 'driver'), ('explosion', 'explosion'), ('locomotive', 'locomotive'), ('machine_gunner', 'machine_gunner'), ('parachute', 'parachute'), ('rocket', 'rocket'), ('searchlight', 'searchlight'), ('ship', 'ship'), ('shell', 'shell'), ('tank_heavy', 'tank_heavy'), ('tank_light', 'tank_light'), ('tank_medium', 'tank_medium'), ('tank_driver', 'tank_driver'), ('tank_turret', 'tank_turret'), ('trash', 'trash'), ('truck', 'truck'), ('vehicle_crew', 'vehicle_crew'), ('vehicle_static', 'vehicle_static'), ('vehicle_turret', 'vehicle_turret'), ('wagon', 'wagon')], blank=True),
),
migrations.AlterField(
model_name='object',
name='is_playable',
field=models.BooleanField(editable=False, default=False),
),
migrations.AlterField(
model_name='player',
name='type',
field=models.CharField(max_length=8, choices=[('pilot', 'pilot'), ('gunner', 'gunner'), ('tankman', 'tankman')], db_index=True, default='pilot'),
),
]
| mit |
hwuiwon/namebench | nb_third_party/dns/tsig.py | 215 | 7851 | # Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS TSIG support."""
import hmac
import struct
import dns.exception
import dns.rdataclass
import dns.name
class BadTime(dns.exception.DNSException):
"""Raised if the current time is not within the TSIG's validity time."""
pass
class BadSignature(dns.exception.DNSException):
"""Raised if the TSIG signature fails to verify."""
pass
class PeerError(dns.exception.DNSException):
"""Base class for all TSIG errors generated by the remote peer"""
pass
class PeerBadKey(PeerError):
"""Raised if the peer didn't know the key we used"""
pass
class PeerBadSignature(PeerError):
"""Raised if the peer didn't like the signature we sent"""
pass
class PeerBadTime(PeerError):
"""Raised if the peer didn't like the time we sent"""
pass
class PeerBadTruncation(PeerError):
"""Raised if the peer didn't like amount of truncation in the TSIG we sent"""
pass
default_algorithm = "HMAC-MD5.SIG-ALG.REG.INT"
BADSIG = 16
BADKEY = 17
BADTIME = 18
BADTRUNC = 22
def sign(wire, keyname, secret, time, fudge, original_id, error,
other_data, request_mac, ctx=None, multi=False, first=True,
algorithm=default_algorithm):
"""Return a (tsig_rdata, mac, ctx) tuple containing the HMAC TSIG rdata
for the input parameters, the HMAC MAC calculated by applying the
TSIG signature algorithm, and the TSIG digest context.
@rtype: (string, string, hmac.HMAC object)
@raises ValueError: I{other_data} is too long
@raises NotImplementedError: I{algorithm} is not supported
"""
(algorithm_name, digestmod) = get_algorithm(algorithm)
if first:
ctx = hmac.new(secret, digestmod=digestmod)
ml = len(request_mac)
if ml > 0:
ctx.update(struct.pack('!H', ml))
ctx.update(request_mac)
id = struct.pack('!H', original_id)
ctx.update(id)
ctx.update(wire[2:])
if first:
ctx.update(keyname.to_digestable())
ctx.update(struct.pack('!H', dns.rdataclass.ANY))
ctx.update(struct.pack('!I', 0))
long_time = time + 0L
upper_time = (long_time >> 32) & 0xffffL
lower_time = long_time & 0xffffffffL
time_mac = struct.pack('!HIH', upper_time, lower_time, fudge)
pre_mac = algorithm_name + time_mac
ol = len(other_data)
if ol > 65535:
raise ValueError('TSIG Other Data is > 65535 bytes')
post_mac = struct.pack('!HH', error, ol) + other_data
if first:
ctx.update(pre_mac)
ctx.update(post_mac)
else:
ctx.update(time_mac)
mac = ctx.digest()
mpack = struct.pack('!H', len(mac))
tsig_rdata = pre_mac + mpack + mac + id + post_mac
if multi:
ctx = hmac.new(secret)
ml = len(mac)
ctx.update(struct.pack('!H', ml))
ctx.update(mac)
else:
ctx = None
return (tsig_rdata, mac, ctx)
def hmac_md5(wire, keyname, secret, time, fudge, original_id, error,
other_data, request_mac, ctx=None, multi=False, first=True,
algorithm=default_algorithm):
return sign(wire, keyname, secret, time, fudge, original_id, error,
other_data, request_mac, ctx, multi, first, algorithm)
def validate(wire, keyname, secret, now, request_mac, tsig_start, tsig_rdata,
tsig_rdlen, ctx=None, multi=False, first=True):
"""Validate the specified TSIG rdata against the other input parameters.
@raises FormError: The TSIG is badly formed.
@raises BadTime: There is too much time skew between the client and the
server.
@raises BadSignature: The TSIG signature did not validate
@rtype: hmac.HMAC object"""
(adcount,) = struct.unpack("!H", wire[10:12])
if adcount == 0:
raise dns.exception.FormError
adcount -= 1
new_wire = wire[0:10] + struct.pack("!H", adcount) + wire[12:tsig_start]
current = tsig_rdata
(aname, used) = dns.name.from_wire(wire, current)
current = current + used
(upper_time, lower_time, fudge, mac_size) = \
struct.unpack("!HIHH", wire[current:current + 10])
time = ((upper_time + 0L) << 32) + (lower_time + 0L)
current += 10
mac = wire[current:current + mac_size]
current += mac_size
(original_id, error, other_size) = \
struct.unpack("!HHH", wire[current:current + 6])
current += 6
other_data = wire[current:current + other_size]
current += other_size
if current != tsig_rdata + tsig_rdlen:
raise dns.exception.FormError
if error != 0:
if error == BADSIG:
raise PeerBadSignature
elif error == BADKEY:
raise PeerBadKey
elif error == BADTIME:
raise PeerBadTime
elif error == BADTRUNC:
raise PeerBadTruncation
else:
raise PeerError('unknown TSIG error code %d' % error)
time_low = time - fudge
time_high = time + fudge
if now < time_low or now > time_high:
raise BadTime
(junk, our_mac, ctx) = sign(new_wire, keyname, secret, time, fudge,
original_id, error, other_data,
request_mac, ctx, multi, first, aname)
if (our_mac != mac):
raise BadSignature
return ctx
def get_algorithm(algorithm):
"""Returns the wire format string and the hash module to use for the
specified TSIG algorithm
@rtype: (string, hash constructor)
@raises NotImplementedError: I{algorithm} is not supported
"""
hashes = {}
try:
import hashlib
hashes[dns.name.from_text('hmac-sha224')] = hashlib.sha224
hashes[dns.name.from_text('hmac-sha256')] = hashlib.sha256
hashes[dns.name.from_text('hmac-sha384')] = hashlib.sha384
hashes[dns.name.from_text('hmac-sha512')] = hashlib.sha512
hashes[dns.name.from_text('hmac-sha1')] = hashlib.sha1
hashes[dns.name.from_text('HMAC-MD5.SIG-ALG.REG.INT')] = hashlib.md5
import sys
if sys.hexversion < 0x02050000:
# hashlib doesn't conform to PEP 247: API for
# Cryptographic Hash Functions, which hmac before python
# 2.5 requires, so add the necessary items.
class HashlibWrapper:
def __init__(self, basehash):
self.basehash = basehash
self.digest_size = self.basehash().digest_size
def new(self, *args, **kwargs):
return self.basehash(*args, **kwargs)
for name in hashes:
hashes[name] = HashlibWrapper(hashes[name])
except ImportError:
import md5, sha
hashes[dns.name.from_text('HMAC-MD5.SIG-ALG.REG.INT')] = md5.md5
hashes[dns.name.from_text('hmac-sha1')] = sha.sha
if isinstance(algorithm, (str, unicode)):
algorithm = dns.name.from_text(algorithm)
if algorithm in hashes:
return (algorithm.to_digestable(), hashes[algorithm])
raise NotImplementedError("TSIG algorithm " + str(algorithm) +
" is not supported")
| apache-2.0 |
AltSchool/django | django/contrib/gis/utils/srs.py | 45 | 3041 | from django.contrib.gis.gdal import SpatialReference
from django.db import DEFAULT_DB_ALIAS, connections
def add_srs_entry(srs, auth_name='EPSG', auth_srid=None, ref_sys_name=None,
database=None):
"""
This function takes a GDAL SpatialReference system and adds its information
to the `spatial_ref_sys` table of the spatial backend. Doing this enables
database-level spatial transformations for the backend. Thus, this utility
is useful for adding spatial reference systems not included by default with
the backend:
>>> from django.contrib.gis.utils import add_srs_entry
>>> add_srs_entry(3857)
Keyword Arguments:
auth_name:
This keyword may be customized with the value of the `auth_name` field.
Defaults to 'EPSG'.
auth_srid:
This keyword may be customized with the value of the `auth_srid` field.
Defaults to the SRID determined by GDAL.
ref_sys_name:
For SpatiaLite users only, sets the value of the `ref_sys_name` field.
Defaults to the name determined by GDAL.
database:
The name of the database connection to use; the default is the value
of `django.db.DEFAULT_DB_ALIAS` (at the time of this writing, its value
is 'default').
"""
if not database:
database = DEFAULT_DB_ALIAS
connection = connections[database]
if not hasattr(connection.ops, 'spatial_version'):
raise Exception('The `add_srs_entry` utility only works '
'with spatial backends.')
if not connection.features.supports_add_srs_entry:
raise Exception('This utility does not support your database backend.')
SpatialRefSys = connection.ops.spatial_ref_sys()
# If argument is not a `SpatialReference` instance, use it as parameter
# to construct a `SpatialReference` instance.
if not isinstance(srs, SpatialReference):
srs = SpatialReference(srs)
if srs.srid is None:
raise Exception('Spatial reference requires an SRID to be '
'compatible with the spatial backend.')
# Initializing the keyword arguments dictionary for both PostGIS
# and SpatiaLite.
kwargs = {'srid': srs.srid,
'auth_name': auth_name,
'auth_srid': auth_srid or srs.srid,
'proj4text': srs.proj4,
}
# Backend-specific fields for the SpatialRefSys model.
srs_field_names = {f.name for f in SpatialRefSys._meta.get_fields()}
if 'srtext' in srs_field_names:
kwargs['srtext'] = srs.wkt
if 'ref_sys_name' in srs_field_names:
# Spatialite specific
kwargs['ref_sys_name'] = ref_sys_name or srs.name
# Creating the spatial_ref_sys model.
try:
# Try getting via SRID only, because using all kwargs may
# differ from exact wkt/proj in database.
SpatialRefSys.objects.using(database).get(srid=srs.srid)
except SpatialRefSys.DoesNotExist:
SpatialRefSys.objects.using(database).create(**kwargs)
| bsd-3-clause |
kaushik94/sympy | sympy/diffgeom/tests/test_hyperbolic_space.py | 22 | 2583 | r'''
unit test describing the hyperbolic half-plane with the Poincare metric. This
is a basic model of hyperbolic geometry on the (positive) half-space
{(x,y) \in R^2 | y > 0}
with the Riemannian metric
ds^2 = (dx^2 + dy^2)/y^2
It has constant negative scalar curvature = -2
https://en.wikipedia.org/wiki/Poincare_half-plane_model
'''
from sympy import diag
from sympy.diffgeom import (twoform_to_matrix,
metric_to_Christoffel_1st, metric_to_Christoffel_2nd,
metric_to_Riemann_components, metric_to_Ricci_components)
import sympy.diffgeom.rn
from sympy.tensor.array import ImmutableDenseNDimArray
def test_H2():
TP = sympy.diffgeom.TensorProduct
R2 = sympy.diffgeom.rn.R2
y = R2.y
dy = R2.dy
dx = R2.dx
g = (TP(dx, dx) + TP(dy, dy))*y**(-2)
automat = twoform_to_matrix(g)
mat = diag(y**(-2), y**(-2))
assert mat == automat
gamma1 = metric_to_Christoffel_1st(g)
assert gamma1[0, 0, 0] == 0
assert gamma1[0, 0, 1] == -y**(-3)
assert gamma1[0, 1, 0] == -y**(-3)
assert gamma1[0, 1, 1] == 0
assert gamma1[1, 1, 1] == -y**(-3)
assert gamma1[1, 1, 0] == 0
assert gamma1[1, 0, 1] == 0
assert gamma1[1, 0, 0] == y**(-3)
gamma2 = metric_to_Christoffel_2nd(g)
assert gamma2[0, 0, 0] == 0
assert gamma2[0, 0, 1] == -y**(-1)
assert gamma2[0, 1, 0] == -y**(-1)
assert gamma2[0, 1, 1] == 0
assert gamma2[1, 1, 1] == -y**(-1)
assert gamma2[1, 1, 0] == 0
assert gamma2[1, 0, 1] == 0
assert gamma2[1, 0, 0] == y**(-1)
Rm = metric_to_Riemann_components(g)
assert Rm[0, 0, 0, 0] == 0
assert Rm[0, 0, 0, 1] == 0
assert Rm[0, 0, 1, 0] == 0
assert Rm[0, 0, 1, 1] == 0
assert Rm[0, 1, 0, 0] == 0
assert Rm[0, 1, 0, 1] == -y**(-2)
assert Rm[0, 1, 1, 0] == y**(-2)
assert Rm[0, 1, 1, 1] == 0
assert Rm[1, 0, 0, 0] == 0
assert Rm[1, 0, 0, 1] == y**(-2)
assert Rm[1, 0, 1, 0] == -y**(-2)
assert Rm[1, 0, 1, 1] == 0
assert Rm[1, 1, 0, 0] == 0
assert Rm[1, 1, 0, 1] == 0
assert Rm[1, 1, 1, 0] == 0
assert Rm[1, 1, 1, 1] == 0
Ric = metric_to_Ricci_components(g)
assert Ric[0, 0] == -y**(-2)
assert Ric[0, 1] == 0
assert Ric[1, 0] == 0
assert Ric[0, 0] == -y**(-2)
assert Ric == ImmutableDenseNDimArray([-y**(-2), 0, 0, -y**(-2)], (2, 2))
## scalar curvature is -2
#TODO - it would be nice to have index contraction built-in
R = (Ric[0, 0] + Ric[1, 1])*y**2
assert R == -2
## Gauss curvature is -1
assert R/2 == -1
| bsd-3-clause |
Lancey6/redwind | migrations/20141130-eliminate-duplicate-tags.py | 3 | 2273 | """
"""
import os
import json
from sqlalchemy import (create_engine, Table, Column, String, Integer,
Float, Text, MetaData, select, ForeignKey,
bindparam, delete, and_)
from config import Configuration
engine = create_engine(Configuration.SQLALCHEMY_DATABASE_URI, echo=True)
metadata = MetaData()
tags = Table(
'tag', metadata,
Column('id', Integer, primary_key=True),
Column('name', String),
)
posts = Table(
'post', metadata,
Column('id', Integer, primary_key=True),
)
posts_to_tags = Table(
'posts_to_tags', metadata,
Column('tag_id', Integer, ForeignKey('tag.id')),
Column('post_id', Integer, ForeignKey('post.id')),
)
def eliminate_duplicates(conn):
tag_map = {}
update_batch = []
delete_batch = []
for row in conn.execute(
select([posts, tags]).select_from(
posts.join(posts_to_tags).join(tags)
).order_by(tags.c.id)):
post_id = row[0]
tag_id = row[1]
tag_name = row[2]
# possible duplicate
if tag_name in tag_map:
preexisting_tag_id = tag_map.get(tag_name)
if preexisting_tag_id != tag_id:
update_batch.append({
'the_post_id': post_id,
'old_tag_id': tag_id,
'new_tag_id': preexisting_tag_id,
})
delete_batch.append({
'the_tag_id': tag_id,
})
else:
tag_map[tag_name] = tag_id
print('update batch', update_batch)
if update_batch:
update_stmt = posts_to_tags.update().where(
and_(
posts_to_tags.c.post_id == bindparam('the_post_id'),
posts_to_tags.c.tag_id == bindparam('old_tag_id')
)
).values(tag_id=bindparam('new_tag_id'))
# print(update_stmt)
# print(update_batch)
conn.execute(update_stmt, update_batch)
print('delete batch', delete_batch)
if delete_batch:
delete_stmt = tags.delete().where(tags.c.id == bindparam('the_tag_id'))
# print(delete_stmt)
conn.execute(delete_stmt, delete_batch)
with engine.begin() as conn:
eliminate_duplicates(conn)
| bsd-2-clause |
mega-force/osmc | package/mediacenter-skin-osmc/files/usr/share/kodi/addons/script.module.unidecode/lib/unidecode/x078.py | 252 | 4648 | data = (
'Dang ', # 0x00
'Ma ', # 0x01
'Sha ', # 0x02
'Dan ', # 0x03
'Jue ', # 0x04
'Li ', # 0x05
'Fu ', # 0x06
'Min ', # 0x07
'Nuo ', # 0x08
'Huo ', # 0x09
'Kang ', # 0x0a
'Zhi ', # 0x0b
'Qi ', # 0x0c
'Kan ', # 0x0d
'Jie ', # 0x0e
'Fen ', # 0x0f
'E ', # 0x10
'Ya ', # 0x11
'Pi ', # 0x12
'Zhe ', # 0x13
'Yan ', # 0x14
'Sui ', # 0x15
'Zhuan ', # 0x16
'Che ', # 0x17
'Dun ', # 0x18
'Pan ', # 0x19
'Yan ', # 0x1a
'[?] ', # 0x1b
'Feng ', # 0x1c
'Fa ', # 0x1d
'Mo ', # 0x1e
'Zha ', # 0x1f
'Qu ', # 0x20
'Yu ', # 0x21
'Luo ', # 0x22
'Tuo ', # 0x23
'Tuo ', # 0x24
'Di ', # 0x25
'Zhai ', # 0x26
'Zhen ', # 0x27
'Ai ', # 0x28
'Fei ', # 0x29
'Mu ', # 0x2a
'Zhu ', # 0x2b
'Li ', # 0x2c
'Bian ', # 0x2d
'Nu ', # 0x2e
'Ping ', # 0x2f
'Peng ', # 0x30
'Ling ', # 0x31
'Pao ', # 0x32
'Le ', # 0x33
'Po ', # 0x34
'Bo ', # 0x35
'Po ', # 0x36
'Shen ', # 0x37
'Za ', # 0x38
'Nuo ', # 0x39
'Li ', # 0x3a
'Long ', # 0x3b
'Tong ', # 0x3c
'[?] ', # 0x3d
'Li ', # 0x3e
'Aragane ', # 0x3f
'Chu ', # 0x40
'Keng ', # 0x41
'Quan ', # 0x42
'Zhu ', # 0x43
'Kuang ', # 0x44
'Huo ', # 0x45
'E ', # 0x46
'Nao ', # 0x47
'Jia ', # 0x48
'Lu ', # 0x49
'Wei ', # 0x4a
'Ai ', # 0x4b
'Luo ', # 0x4c
'Ken ', # 0x4d
'Xing ', # 0x4e
'Yan ', # 0x4f
'Tong ', # 0x50
'Peng ', # 0x51
'Xi ', # 0x52
'[?] ', # 0x53
'Hong ', # 0x54
'Shuo ', # 0x55
'Xia ', # 0x56
'Qiao ', # 0x57
'[?] ', # 0x58
'Wei ', # 0x59
'Qiao ', # 0x5a
'[?] ', # 0x5b
'Keng ', # 0x5c
'Xiao ', # 0x5d
'Que ', # 0x5e
'Chan ', # 0x5f
'Lang ', # 0x60
'Hong ', # 0x61
'Yu ', # 0x62
'Xiao ', # 0x63
'Xia ', # 0x64
'Mang ', # 0x65
'Long ', # 0x66
'Iong ', # 0x67
'Che ', # 0x68
'Che ', # 0x69
'E ', # 0x6a
'Liu ', # 0x6b
'Ying ', # 0x6c
'Mang ', # 0x6d
'Que ', # 0x6e
'Yan ', # 0x6f
'Sha ', # 0x70
'Kun ', # 0x71
'Yu ', # 0x72
'[?] ', # 0x73
'Kaki ', # 0x74
'Lu ', # 0x75
'Chen ', # 0x76
'Jian ', # 0x77
'Nue ', # 0x78
'Song ', # 0x79
'Zhuo ', # 0x7a
'Keng ', # 0x7b
'Peng ', # 0x7c
'Yan ', # 0x7d
'Zhui ', # 0x7e
'Kong ', # 0x7f
'Ceng ', # 0x80
'Qi ', # 0x81
'Zong ', # 0x82
'Qing ', # 0x83
'Lin ', # 0x84
'Jun ', # 0x85
'Bo ', # 0x86
'Ding ', # 0x87
'Min ', # 0x88
'Diao ', # 0x89
'Jian ', # 0x8a
'He ', # 0x8b
'Lu ', # 0x8c
'Ai ', # 0x8d
'Sui ', # 0x8e
'Que ', # 0x8f
'Ling ', # 0x90
'Bei ', # 0x91
'Yin ', # 0x92
'Dui ', # 0x93
'Wu ', # 0x94
'Qi ', # 0x95
'Lun ', # 0x96
'Wan ', # 0x97
'Dian ', # 0x98
'Gang ', # 0x99
'Pei ', # 0x9a
'Qi ', # 0x9b
'Chen ', # 0x9c
'Ruan ', # 0x9d
'Yan ', # 0x9e
'Die ', # 0x9f
'Ding ', # 0xa0
'Du ', # 0xa1
'Tuo ', # 0xa2
'Jie ', # 0xa3
'Ying ', # 0xa4
'Bian ', # 0xa5
'Ke ', # 0xa6
'Bi ', # 0xa7
'Wei ', # 0xa8
'Shuo ', # 0xa9
'Zhen ', # 0xaa
'Duan ', # 0xab
'Xia ', # 0xac
'Dang ', # 0xad
'Ti ', # 0xae
'Nao ', # 0xaf
'Peng ', # 0xb0
'Jian ', # 0xb1
'Di ', # 0xb2
'Tan ', # 0xb3
'Cha ', # 0xb4
'Seki ', # 0xb5
'Qi ', # 0xb6
'[?] ', # 0xb7
'Feng ', # 0xb8
'Xuan ', # 0xb9
'Que ', # 0xba
'Que ', # 0xbb
'Ma ', # 0xbc
'Gong ', # 0xbd
'Nian ', # 0xbe
'Su ', # 0xbf
'E ', # 0xc0
'Ci ', # 0xc1
'Liu ', # 0xc2
'Si ', # 0xc3
'Tang ', # 0xc4
'Bang ', # 0xc5
'Hua ', # 0xc6
'Pi ', # 0xc7
'Wei ', # 0xc8
'Sang ', # 0xc9
'Lei ', # 0xca
'Cuo ', # 0xcb
'Zhen ', # 0xcc
'Xia ', # 0xcd
'Qi ', # 0xce
'Lian ', # 0xcf
'Pan ', # 0xd0
'Wei ', # 0xd1
'Yun ', # 0xd2
'Dui ', # 0xd3
'Zhe ', # 0xd4
'Ke ', # 0xd5
'La ', # 0xd6
'[?] ', # 0xd7
'Qing ', # 0xd8
'Gun ', # 0xd9
'Zhuan ', # 0xda
'Chan ', # 0xdb
'Qi ', # 0xdc
'Ao ', # 0xdd
'Peng ', # 0xde
'Lu ', # 0xdf
'Lu ', # 0xe0
'Kan ', # 0xe1
'Qiang ', # 0xe2
'Chen ', # 0xe3
'Yin ', # 0xe4
'Lei ', # 0xe5
'Biao ', # 0xe6
'Qi ', # 0xe7
'Mo ', # 0xe8
'Qi ', # 0xe9
'Cui ', # 0xea
'Zong ', # 0xeb
'Qing ', # 0xec
'Chuo ', # 0xed
'[?] ', # 0xee
'Ji ', # 0xef
'Shan ', # 0xf0
'Lao ', # 0xf1
'Qu ', # 0xf2
'Zeng ', # 0xf3
'Deng ', # 0xf4
'Jian ', # 0xf5
'Xi ', # 0xf6
'Lin ', # 0xf7
'Ding ', # 0xf8
'Dian ', # 0xf9
'Huang ', # 0xfa
'Pan ', # 0xfb
'Za ', # 0xfc
'Qiao ', # 0xfd
'Di ', # 0xfe
'Li ', # 0xff
)
| gpl-2.0 |
voodka/ghostbakup | node_modules/testem/node_modules/socket.io/node_modules/engine.io/node_modules/engine.io-parser/node_modules/utf8/tests/generate-test-data.py | 1788 | 1435 | #!/usr/bin/env python
import re
import json
# https://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
# http://stackoverflow.com/a/13436167/96656
def unisymbol(codePoint):
if codePoint >= 0x0000 and codePoint <= 0xFFFF:
return unichr(codePoint)
elif codePoint >= 0x010000 and codePoint <= 0x10FFFF:
highSurrogate = int((codePoint - 0x10000) / 0x400) + 0xD800
lowSurrogate = int((codePoint - 0x10000) % 0x400) + 0xDC00
return unichr(highSurrogate) + unichr(lowSurrogate)
else:
return 'Error'
def hexify(codePoint):
return 'U+' + hex(codePoint)[2:].upper().zfill(6)
def writeFile(filename, contents):
print filename
with open(filename, 'w') as f:
f.write(contents.strip() + '\n')
data = []
for codePoint in range(0x000000, 0x10FFFF + 1):
# Skip non-scalar values.
if codePoint >= 0xD800 and codePoint <= 0xDFFF:
continue
symbol = unisymbol(codePoint)
# http://stackoverflow.com/a/17199950/96656
bytes = symbol.encode('utf8').decode('latin1')
data.append({
'codePoint': codePoint,
'decoded': symbol,
'encoded': bytes
});
jsonData = json.dumps(data, sort_keys=False, indent=2, separators=(',', ': '))
# Use tabs instead of double spaces for indentation
jsonData = jsonData.replace(' ', '\t')
# Escape hexadecimal digits in escape sequences
jsonData = re.sub(
r'\\u([a-fA-F0-9]{4})',
lambda match: r'\u{}'.format(match.group(1).upper()),
jsonData
)
writeFile('data.json', jsonData)
| mit |
sbc100/native_client | src/trusted/validator_ragel/PRESUBMIT.py | 12 | 4775 | # Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Documentation on PRESUBMIT.py can be found at:
# http://www.chromium.org/developers/how-tos/depottools/presubmit-scripts
import json
import hashlib
import os
import re
import gclient_utils
def CheckChange(input_api, message_constructor):
"""Checks for files with a modified contents.
Some checking of validator happens on builbots, but comprehensive enumeration
tests must be run locally.
There are two dangers:
1. Source code for autogenerated files can be modified without regeneration
of said files.
2. Source of validator can be changed without running the aforementioned
tests.
This function catches the situation when source files for validator_x86_??.c
are changed but files are not regenerated and it also catches the situation
when code is changed without running the dfacheckvalidator tests.
"""
errors = []
changelist = input_api.change
root_path = changelist.RepositoryRoot()
if input_api.change.scm == 'svn':
try:
# With SVN you can decide to commit not all modified files but some of
# them thus separate GetAllModifiedFiles() and GetModifiedFiles() lists
# are provided. We need to remove root_path from the name of file.
assert all(filename.startswith(root_path + os.path.sep)
for filename in changelist.GetAllModifiedFiles())
all_filenames = [filename[len(root_path + os.path.sep):]
for filename in changelist.GetAllModifiedFiles()]
assert all(filename.startswith(root_path + os.path.sep)
for filename in changelist.GetModifiedFiles())
modified_filenames = [filename[len(root_path + os.path.sep):]
for filename in changelist.GetModifiedFiles()]
except:
# If gcl is not available (which happens in CQ bots) then we'll try to use
# AffectedFiles() instead of GetAllModifiedFiles()
all_filenames = [file.LocalPath() for file in changelist.AffectedFiles()]
modified_filenames = all_filenames
else:
# With GIT you must commit all modified files thus only AffectedFiles()
# list is provided.
all_filenames = [file.LocalPath() for file in changelist.AffectedFiles()]
modified_filenames = all_filenames
json_filename = os.path.join(
'src', 'trusted', 'validator_ragel', 'gen', 'protected_files.json')
protected_files = json.loads(
gclient_utils.FileRead(os.path.join(root_path, json_filename)))
need_dfagen = False
need_dfacheckvalidator = False
canonical_prefix = 'native_client/'
for filename in sorted(all_filenames):
canonical_filename = canonical_prefix + filename.replace('\\', '/')
if canonical_filename in protected_files['validator']:
file_contents = gclient_utils.FileRead(os.path.join(root_path, filename))
sha512 = hashlib.sha512(file_contents).hexdigest()
if sha512 != protected_files['validator'][canonical_filename]:
errors.append(message_constructor(
'Incorrect {0} hash:\n expected {1}\n got {2}'.format(
canonical_filename,
protected_files['validator'][canonical_filename],
sha512)))
need_dfacheckvalidator = True
if canonical_filename in protected_files['generating']:
for automaton_filename in protected_files['generated']:
if (os.stat(os.path.join(root_path, filename)).st_mtime >
os.stat(os.path.join(root_path,
automaton_filename[len(canonical_prefix):])).st_mtime):
errors.append(message_constructor(
'File {0} is older then {1}'.format(
automaton_filename, canonical_filename)))
need_dfagen = True
if (canonical_filename in protected_files['validator'] or
canonical_filename in protected_files['generating'] or
filename == json_filename):
if filename not in modified_filenames:
errors.append(message_constructor(
'File {0} is changed but is excluded from this CL'.format(
canonical_filename)))
if need_dfagen:
errors.append(message_constructor(
'Please run "./scons dfagen" before commit!'))
if need_dfacheckvalidator:
errors.append(message_constructor(
'Please run "./scons dfacheckvalidator" before commit!'))
return errors
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api,
message_constructor=output_api.PresubmitPromptWarning)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api,
message_constructor=output_api.PresubmitError)
| bsd-3-clause |
tudorbarascu/QGIS | tests/src/python/test_qgsscalewidget.py | 15 | 2940 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsScaleWidget
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '13/03/2019'
__copyright__ = 'Copyright 2019, The QGIS Project'
import qgis # NOQA
import math
from qgis.PyQt.QtCore import Qt
from qgis.PyQt.QtTest import QSignalSpy
from qgis.gui import QgsScaleWidget
from qgis.testing import start_app, unittest
start_app()
class TestQgsScaleWidget(unittest.TestCase):
def testBasic(self):
w = QgsScaleWidget()
spy = QSignalSpy(w.scaleChanged)
w.setScaleString('1:2345')
self.assertEqual(w.scaleString(), '1:2,345')
self.assertEqual(w.scale(), 2345)
self.assertEqual(len(spy), 1)
self.assertEqual(spy[-1][0], 2345)
w.setScaleString('0.02')
self.assertEqual(w.scaleString(), '1:50')
self.assertEqual(w.scale(), 50)
self.assertEqual(len(spy), 2)
self.assertEqual(spy[-1][0], 50)
w.setScaleString('1:4,000')
self.assertEqual(w.scaleString(), '1:4,000')
self.assertEqual(w.scale(), 4000)
self.assertEqual(len(spy), 3)
self.assertEqual(spy[-1][0], 4000)
def testNull(self):
w = QgsScaleWidget()
w.setScale(50)
self.assertFalse(w.allowNull())
w.setNull() # no effect
self.assertEqual(w.scale(), 50.0)
self.assertFalse(w.isNull())
spy = QSignalSpy(w.scaleChanged)
w.setAllowNull(True)
self.assertTrue(w.allowNull())
w.setScaleString('')
self.assertEqual(len(spy), 1)
self.assertTrue(math.isnan(w.scale()))
self.assertTrue(math.isnan(spy[-1][0]))
self.assertTrue(w.isNull())
w.setScaleString(" ")
self.assertTrue(math.isnan(w.scale()))
self.assertTrue(w.isNull())
w.setScaleString('0.02')
self.assertEqual(w.scale(), 50.0)
self.assertEqual(len(spy), 2)
self.assertEqual(spy[-1][0], 50.0)
self.assertFalse(w.isNull())
w.setScaleString('')
self.assertTrue(math.isnan(w.scale()))
self.assertEqual(len(spy), 3)
self.assertTrue(math.isnan(spy[-1][0]))
self.assertTrue(w.isNull())
w.setScaleString('0.02')
self.assertEqual(w.scale(), 50.0)
self.assertEqual(len(spy), 4)
self.assertEqual(spy[-1][0], 50.0)
self.assertFalse(w.isNull())
w.setNull()
self.assertTrue(math.isnan(w.scale()))
self.assertEqual(len(spy), 5)
self.assertTrue(math.isnan(spy[-1][0]))
self.assertTrue(w.isNull())
w.setAllowNull(False)
self.assertFalse(w.allowNull())
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
aferr/TemporalPartitioningMemCtl | tests/configs/o3-timing.py | 14 | 2408 | # Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
import m5
from m5.objects import *
m5.util.addToPath('../configs/common')
class MyCache(BaseCache):
assoc = 2
block_size = 64
latency = '1ns'
mshrs = 10
tgts_per_mshr = 5
class MyL1Cache(MyCache):
is_top_level = True
tgts_per_mshr = 20
cpu = DerivO3CPU(cpu_id=0)
cpu.addTwoLevelCacheHierarchy(MyL1Cache(size = '128kB'),
MyL1Cache(size = '256kB'),
MyCache(size = '2MB'))
cpu.clock = '2GHz'
system = System(cpu = cpu,
physmem = SimpleMemory(),
membus = CoherentBus())
system.system_port = system.membus.slave
system.physmem.port = system.membus.master
# create the interrupt controller
cpu.createInterruptController()
cpu.connectAllPorts(system.membus)
root = Root(full_system = False, system = system)
| bsd-3-clause |
jirikuncar/invenio | invenio/testsuite/test_ext_email.py | 16 | 11033 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Test unit for the miscutil/mailutils module.
"""
import os
import sys
import pkg_resources
from base64 import encodestring
from six import iteritems, StringIO
from flask import current_app
from invenio.ext.email import send_email
from invenio.testsuite import make_test_suite, run_test_suite, InvenioTestCase
class MailTestCase(InvenioTestCase):
EMAIL_BACKEND = 'flask_email.backends.console.Mail'
def setUp(self):
super(MailTestCase, self).setUp()
current_app.config['EMAIL_BACKEND'] = self.EMAIL_BACKEND
self.__stdout = sys.stdout
self.stream = sys.stdout = StringIO()
def tearDown(self):
del self.stream
sys.stdout = self.__stdout
del self.__stdout
super(MailTestCase, self).tearDown()
def flush_mailbox(self):
self.stream = sys.stdout = StringIO()
#def get_mailbox_content(self):
# messages = self.stream.getvalue().split('\n' + ('-' * 79) + '\n')
# return [message_from_string(m) for m in messages if m]
class TestMailUtils(MailTestCase):
"""
mailutils TestSuite.
"""
def test_console_send_email(self):
"""
Test that the console backend can be pointed at an arbitrary stream.
"""
msg_content = """Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Subject: Subject
From: [email protected]
To: [email protected]"""
send_email('[email protected]', ['[email protected]'], subject='Subject',
content='Content')
self.assertIn(msg_content, sys.stdout.getvalue())
self.flush_mailbox()
send_email('[email protected]', '[email protected]', subject='Subject',
content='Content')
self.assertIn(msg_content, sys.stdout.getvalue())
self.flush_mailbox()
def test_email_text_template(self):
"""
Test email text template engine.
"""
from invenio.ext.template import render_template_to_string
contexts = {
'ctx1': {'content': 'Content 1'},
'ctx2': {'content': 'Content 2', 'header': 'Header 2'},
'ctx3': {'content': 'Content 3', 'footer': 'Footer 3'},
'ctx4': {'content': 'Content 4', 'header': 'Header 4', 'footer': 'Footer 4'}
}
msg_content = """Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Subject: %s
From: [email protected]
To: [email protected]"""
for name, ctx in iteritems(contexts):
msg = render_template_to_string('mail_text.tpl', **ctx)
send_email('[email protected]', ['[email protected]'], subject=name,
**ctx)
email = sys.stdout.getvalue()
self.assertIn(msg_content % name, email)
self.assertIn(msg, email)
self.flush_mailbox()
def test_email_html_template(self):
"""
Test email html template engine.
"""
from invenio.ext.template import render_template_to_string
contexts = {
'ctx1': {'html_content': '<b>Content 1</b>'},
'ctx2': {'html_content': '<b>Content 2</b>',
'html_header': '<h1>Header 2</h1>'},
'ctx3': {'html_content': '<b>Content 3</b>',
'html_footer': '<i>Footer 3</i>'},
'ctx4': {'html_content': '<b>Content 4</b>',
'html_header': '<h1>Header 4</h1>',
'html_footer': '<i>Footer 4</i>'}
}
def strip_html_key(ctx):
return dict(map(lambda (k, v): (k[5:], v), iteritems(ctx)))
for name, ctx in iteritems(contexts):
msg = render_template_to_string('mail_html.tpl',
**strip_html_key(ctx))
send_email('[email protected]', ['[email protected]'], subject=name,
content='Content Text', **ctx)
email = sys.stdout.getvalue()
self.assertIn('Content-Type: multipart/alternative;', email)
self.assertIn('Content Text', email)
self.assertIn(msg, email)
self.flush_mailbox()
def test_email_html_image(self):
"""
Test sending html message with an image.
"""
html_images = {
'img1': pkg_resources.resource_filename(
'invenio.base',
os.path.join('static', 'img', 'journal_water_dog.gif')
)
}
send_email('[email protected]', ['[email protected]'],
subject='Subject', content='Content Text',
html_content='<img src="cid:img1"/>',
html_images=html_images)
email = sys.stdout.getvalue()
self.assertIn('Content Text', email)
self.assertIn('<img src="cid:img1"/>', email)
with open(html_images['img1'], 'r') as f:
self.assertIn(encodestring(f.read()), email)
self.flush_mailbox()
def test_sending_attachment(self):
"""
Test sending email with an attachment.
"""
attachments = [
pkg_resources.resource_filename(
'invenio.base',
os.path.join('static', 'img', 'journal_header.png')
)
]
send_email('[email protected]', ['[email protected]'],
subject='Subject', content='Content Text',
attachments=attachments)
email = sys.stdout.getvalue()
self.assertIn('Content Text', email)
# First attachemnt is image/png
self.assertIn('Content-Type: image/png', email)
for attachment in attachments:
with open(attachment, 'r') as f:
self.assertIn(encodestring(f.read()), email)
self.flush_mailbox()
def test_single_recipient(self):
"""
Test that the email receivers are hidden.
"""
msg_content = """Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Subject: Subject
From: [email protected]
To: [email protected]"""
send_email('[email protected]', ['[email protected]'],
subject='Subject', content='Content')
email = sys.stdout.getvalue()
self.assertIn(msg_content, email)
self.flush_mailbox()
send_email('[email protected]', '[email protected]',
subject='Subject', content='Content')
email = sys.stdout.getvalue()
self.assertIn(msg_content, email)
self.flush_mailbox()
def test_bbc_undisclosed_recipients(self):
"""
Test that the email receivers are hidden.
"""
msg_content = """Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Subject: Subject
From: [email protected]
To: Undisclosed.Recipients:"""
send_email('[email protected]', ['[email protected]', '[email protected]'],
subject='Subject', content='Content')
email = sys.stdout.getvalue()
self.assertIn(msg_content, email)
self.assertNotIn('Bcc: [email protected],[email protected]', email)
self.flush_mailbox()
send_email('[email protected]', '[email protected], [email protected]',
subject='Subject', content='Content')
email = sys.stdout.getvalue()
self.assertIn(msg_content, email)
self.assertNotIn('Bcc: [email protected],[email protected]', email)
self.flush_mailbox()
class TestAdminMailBackend(MailTestCase):
EMAIL_BACKEND = 'invenio.ext.email.backends.console_adminonly.Mail'
ADMIN_MESSAGE = "This message would have been sent to the following recipients"
def test_simple_email_header(self):
"""
Test simple email header.
"""
from invenio.config import CFG_SITE_ADMIN_EMAIL
from invenio.ext.template import render_template_to_string
msg_content = """Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Subject: Subject
From: [email protected]
To: %s""" % (CFG_SITE_ADMIN_EMAIL, )
msg = render_template_to_string('mail_text.tpl', content='Content')
self.flush_mailbox()
send_email('[email protected]', ['[email protected]'], subject='Subject',
content='Content')
email = self.stream.getvalue()
self.assertIn(msg_content, email)
self.assertIn(self.ADMIN_MESSAGE, email)
self.assertNotIn('Bcc:', email)
self.assertIn(msg, email)
self.flush_mailbox()
send_email('[email protected]', '[email protected]', subject='Subject',
content='Content')
email = self.stream.getvalue()
self.assertIn(msg_content, email)
self.assertIn(self.ADMIN_MESSAGE, email)
self.assertNotIn('Bcc:', email)
self.assertIn(msg, email)
self.flush_mailbox()
def test_cc_bcc_headers(self):
"""
Test that no Cc and Bcc headers are sent.
"""
from invenio.config import CFG_SITE_ADMIN_EMAIL
msg_content = """Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Subject: Subject
From: [email protected]
To: %s""" % (CFG_SITE_ADMIN_EMAIL, )
send_email('[email protected]', ['[email protected]', '[email protected]'],
subject='Subject', content='Content')
email = self.stream.getvalue()
self.assertIn(msg_content, email)
self.assertIn(self.ADMIN_MESSAGE, email)
self.assertIn('[email protected],[email protected]', email)
self.assertNotIn('Bcc: [email protected],[email protected]', email)
self.flush_mailbox()
send_email('[email protected]', '[email protected], [email protected]',
subject='Subject', content='Content')
email = self.stream.getvalue()
self.assertIn(msg_content, email)
self.assertIn(self.ADMIN_MESSAGE, email)
self.assertIn('[email protected],[email protected]', email)
self.assertNotIn('Bcc: [email protected],[email protected]', email)
self.flush_mailbox()
TEST_SUITE = make_test_suite(TestMailUtils, TestAdminMailBackend)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
| gpl-2.0 |
sholtebeck/knarflog | lib/werkzeug/formparser.py | 3 | 21790 | # -*- coding: utf-8 -*-
"""
werkzeug.formparser
~~~~~~~~~~~~~~~~~~~
This module implements the form parsing. It supports url-encoded forms
as well as non-nested multipart uploads.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import codecs
import re
from functools import update_wrapper
from itertools import chain
from itertools import repeat
from itertools import tee
from ._compat import BytesIO
from ._compat import text_type
from ._compat import to_native
from .datastructures import FileStorage
from .datastructures import Headers
from .datastructures import MultiDict
from .http import parse_options_header
from .urls import url_decode_stream
from .wsgi import get_content_length
from .wsgi import get_input_stream
from .wsgi import make_line_iter
# there are some platforms where SpooledTemporaryFile is not available.
# In that case we need to provide a fallback.
try:
from tempfile import SpooledTemporaryFile
except ImportError:
from tempfile import TemporaryFile
SpooledTemporaryFile = None
#: an iterator that yields empty strings
_empty_string_iter = repeat("")
#: a regular expression for multipart boundaries
_multipart_boundary_re = re.compile("^[ -~]{0,200}[!-~]$")
#: supported http encodings that are also available in python we support
#: for multipart messages.
_supported_multipart_encodings = frozenset(["base64", "quoted-printable"])
def default_stream_factory(
total_content_length, filename, content_type, content_length=None
):
"""The stream factory that is used per default."""
max_size = 1024 * 500
if SpooledTemporaryFile is not None:
return SpooledTemporaryFile(max_size=max_size, mode="wb+")
if total_content_length is None or total_content_length > max_size:
return TemporaryFile("wb+")
return BytesIO()
def parse_form_data(
environ,
stream_factory=None,
charset="utf-8",
errors="replace",
max_form_memory_size=None,
max_content_length=None,
cls=None,
silent=True,
):
"""Parse the form data in the environ and return it as tuple in the form
``(stream, form, files)``. You should only call this method if the
transport method is `POST`, `PUT`, or `PATCH`.
If the mimetype of the data transmitted is `multipart/form-data` the
files multidict will be filled with `FileStorage` objects. If the
mimetype is unknown the input stream is wrapped and returned as first
argument, else the stream is empty.
This is a shortcut for the common usage of :class:`FormDataParser`.
Have a look at :ref:`dealing-with-request-data` for more details.
.. versionadded:: 0.5
The `max_form_memory_size`, `max_content_length` and
`cls` parameters were added.
.. versionadded:: 0.5.1
The optional `silent` flag was added.
:param environ: the WSGI environment to be used for parsing.
:param stream_factory: An optional callable that returns a new read and
writeable file descriptor. This callable works
the same as :meth:`~BaseResponse._get_file_stream`.
:param charset: The character set for URL and url encoded form data.
:param errors: The encoding error behavior.
:param max_form_memory_size: the maximum number of bytes to be accepted for
in-memory stored form data. If the data
exceeds the value specified an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param max_content_length: If this is provided and the transmitted data
is longer than this value an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
:param silent: If set to False parsing errors will not be caught.
:return: A tuple in the form ``(stream, form, files)``.
"""
return FormDataParser(
stream_factory,
charset,
errors,
max_form_memory_size,
max_content_length,
cls,
silent,
).parse_from_environ(environ)
def exhaust_stream(f):
"""Helper decorator for methods that exhausts the stream on return."""
def wrapper(self, stream, *args, **kwargs):
try:
return f(self, stream, *args, **kwargs)
finally:
exhaust = getattr(stream, "exhaust", None)
if exhaust is not None:
exhaust()
else:
while 1:
chunk = stream.read(1024 * 64)
if not chunk:
break
return update_wrapper(wrapper, f)
class FormDataParser(object):
"""This class implements parsing of form data for Werkzeug. By itself
it can parse multipart and url encoded form data. It can be subclassed
and extended but for most mimetypes it is a better idea to use the
untouched stream and expose it as separate attributes on a request
object.
.. versionadded:: 0.8
:param stream_factory: An optional callable that returns a new read and
writeable file descriptor. This callable works
the same as :meth:`~BaseResponse._get_file_stream`.
:param charset: The character set for URL and url encoded form data.
:param errors: The encoding error behavior.
:param max_form_memory_size: the maximum number of bytes to be accepted for
in-memory stored form data. If the data
exceeds the value specified an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param max_content_length: If this is provided and the transmitted data
is longer than this value an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
:param silent: If set to False parsing errors will not be caught.
"""
def __init__(
self,
stream_factory=None,
charset="utf-8",
errors="replace",
max_form_memory_size=None,
max_content_length=None,
cls=None,
silent=True,
):
if stream_factory is None:
stream_factory = default_stream_factory
self.stream_factory = stream_factory
self.charset = charset
self.errors = errors
self.max_form_memory_size = max_form_memory_size
self.max_content_length = max_content_length
if cls is None:
cls = MultiDict
self.cls = cls
self.silent = silent
def get_parse_func(self, mimetype, options):
return self.parse_functions.get(mimetype)
def parse_from_environ(self, environ):
"""Parses the information from the environment as form data.
:param environ: the WSGI environment to be used for parsing.
:return: A tuple in the form ``(stream, form, files)``.
"""
content_type = environ.get("CONTENT_TYPE", "")
content_length = get_content_length(environ)
mimetype, options = parse_options_header(content_type)
return self.parse(get_input_stream(environ), mimetype, content_length, options)
def parse(self, stream, mimetype, content_length, options=None):
"""Parses the information from the given stream, mimetype,
content length and mimetype parameters.
:param stream: an input stream
:param mimetype: the mimetype of the data
:param content_length: the content length of the incoming data
:param options: optional mimetype parameters (used for
the multipart boundary for instance)
:return: A tuple in the form ``(stream, form, files)``.
"""
if (
self.max_content_length is not None
and content_length is not None
and content_length > self.max_content_length
):
raise exceptions.RequestEntityTooLarge()
if options is None:
options = {}
parse_func = self.get_parse_func(mimetype, options)
if parse_func is not None:
try:
return parse_func(self, stream, mimetype, content_length, options)
except ValueError:
if not self.silent:
raise
return stream, self.cls(), self.cls()
@exhaust_stream
def _parse_multipart(self, stream, mimetype, content_length, options):
parser = MultiPartParser(
self.stream_factory,
self.charset,
self.errors,
max_form_memory_size=self.max_form_memory_size,
cls=self.cls,
)
boundary = options.get("boundary")
if boundary is None:
raise ValueError("Missing boundary")
if isinstance(boundary, text_type):
boundary = boundary.encode("ascii")
form, files = parser.parse(stream, boundary, content_length)
return stream, form, files
@exhaust_stream
def _parse_urlencoded(self, stream, mimetype, content_length, options):
if (
self.max_form_memory_size is not None
and content_length is not None
and content_length > self.max_form_memory_size
):
raise exceptions.RequestEntityTooLarge()
form = url_decode_stream(stream, self.charset, errors=self.errors, cls=self.cls)
return stream, form, self.cls()
#: mapping of mimetypes to parsing functions
parse_functions = {
"multipart/form-data": _parse_multipart,
"application/x-www-form-urlencoded": _parse_urlencoded,
"application/x-url-encoded": _parse_urlencoded,
}
def is_valid_multipart_boundary(boundary):
"""Checks if the string given is a valid multipart boundary."""
return _multipart_boundary_re.match(boundary) is not None
def _line_parse(line):
"""Removes line ending characters and returns a tuple (`stripped_line`,
`is_terminated`).
"""
if line[-2:] in ["\r\n", b"\r\n"]:
return line[:-2], True
elif line[-1:] in ["\r", "\n", b"\r", b"\n"]:
return line[:-1], True
return line, False
def parse_multipart_headers(iterable):
"""Parses multipart headers from an iterable that yields lines (including
the trailing newline symbol). The iterable has to be newline terminated.
The iterable will stop at the line where the headers ended so it can be
further consumed.
:param iterable: iterable of strings that are newline terminated
"""
result = []
for line in iterable:
line = to_native(line)
line, line_terminated = _line_parse(line)
if not line_terminated:
raise ValueError("unexpected end of line in multipart header")
if not line:
break
elif line[0] in " \t" and result:
key, value = result[-1]
result[-1] = (key, value + "\n " + line[1:])
else:
parts = line.split(":", 1)
if len(parts) == 2:
result.append((parts[0].strip(), parts[1].strip()))
# we link the list to the headers, no need to create a copy, the
# list was not shared anyways.
return Headers(result)
_begin_form = "begin_form"
_begin_file = "begin_file"
_cont = "cont"
_end = "end"
class MultiPartParser(object):
def __init__(
self,
stream_factory=None,
charset="utf-8",
errors="replace",
max_form_memory_size=None,
cls=None,
buffer_size=64 * 1024,
):
self.charset = charset
self.errors = errors
self.max_form_memory_size = max_form_memory_size
self.stream_factory = (
default_stream_factory if stream_factory is None else stream_factory
)
self.cls = MultiDict if cls is None else cls
# make sure the buffer size is divisible by four so that we can base64
# decode chunk by chunk
assert buffer_size % 4 == 0, "buffer size has to be divisible by 4"
# also the buffer size has to be at least 1024 bytes long or long headers
# will freak out the system
assert buffer_size >= 1024, "buffer size has to be at least 1KB"
self.buffer_size = buffer_size
def _fix_ie_filename(self, filename):
"""Internet Explorer 6 transmits the full file name if a file is
uploaded. This function strips the full path if it thinks the
filename is Windows-like absolute.
"""
if filename[1:3] == ":\\" or filename[:2] == "\\\\":
return filename.split("\\")[-1]
return filename
def _find_terminator(self, iterator):
"""The terminator might have some additional newlines before it.
There is at least one application that sends additional newlines
before headers (the python setuptools package).
"""
for line in iterator:
if not line:
break
line = line.strip()
if line:
return line
return b""
def fail(self, message):
raise ValueError(message)
def get_part_encoding(self, headers):
transfer_encoding = headers.get("content-transfer-encoding")
if (
transfer_encoding is not None
and transfer_encoding in _supported_multipart_encodings
):
return transfer_encoding
def get_part_charset(self, headers):
# Figure out input charset for current part
content_type = headers.get("content-type")
if content_type:
mimetype, ct_params = parse_options_header(content_type)
return ct_params.get("charset", self.charset)
return self.charset
def start_file_streaming(self, filename, headers, total_content_length):
if isinstance(filename, bytes):
filename = filename.decode(self.charset, self.errors)
filename = self._fix_ie_filename(filename)
content_type = headers.get("content-type")
try:
content_length = int(headers["content-length"])
except (KeyError, ValueError):
content_length = 0
container = self.stream_factory(
total_content_length=total_content_length,
filename=filename,
content_type=content_type,
content_length=content_length,
)
return filename, container
def in_memory_threshold_reached(self, bytes):
raise exceptions.RequestEntityTooLarge()
def validate_boundary(self, boundary):
if not boundary:
self.fail("Missing boundary")
if not is_valid_multipart_boundary(boundary):
self.fail("Invalid boundary: %s" % boundary)
if len(boundary) > self.buffer_size: # pragma: no cover
# this should never happen because we check for a minimum size
# of 1024 and boundaries may not be longer than 200. The only
# situation when this happens is for non debug builds where
# the assert is skipped.
self.fail("Boundary longer than buffer size")
def parse_lines(self, file, boundary, content_length, cap_at_buffer=True):
"""Generate parts of
``('begin_form', (headers, name))``
``('begin_file', (headers, name, filename))``
``('cont', bytestring)``
``('end', None)``
Always obeys the grammar
parts = ( begin_form cont* end |
begin_file cont* end )*
"""
next_part = b"--" + boundary
last_part = next_part + b"--"
iterator = chain(
make_line_iter(
file,
limit=content_length,
buffer_size=self.buffer_size,
cap_at_buffer=cap_at_buffer,
),
_empty_string_iter,
)
terminator = self._find_terminator(iterator)
if terminator == last_part:
return
elif terminator != next_part:
self.fail("Expected boundary at start of multipart data")
while terminator != last_part:
headers = parse_multipart_headers(iterator)
disposition = headers.get("content-disposition")
if disposition is None:
self.fail("Missing Content-Disposition header")
disposition, extra = parse_options_header(disposition)
transfer_encoding = self.get_part_encoding(headers)
name = extra.get("name")
filename = extra.get("filename")
# if no content type is given we stream into memory. A list is
# used as a temporary container.
if filename is None:
yield _begin_form, (headers, name)
# otherwise we parse the rest of the headers and ask the stream
# factory for something we can write in.
else:
yield _begin_file, (headers, name, filename)
buf = b""
for line in iterator:
if not line:
self.fail("unexpected end of stream")
if line[:2] == b"--":
terminator = line.rstrip()
if terminator in (next_part, last_part):
break
if transfer_encoding is not None:
if transfer_encoding == "base64":
transfer_encoding = "base64_codec"
try:
line = codecs.decode(line, transfer_encoding)
except Exception:
self.fail("could not decode transfer encoded chunk")
# we have something in the buffer from the last iteration.
# this is usually a newline delimiter.
if buf:
yield _cont, buf
buf = b""
# If the line ends with windows CRLF we write everything except
# the last two bytes. In all other cases however we write
# everything except the last byte. If it was a newline, that's
# fine, otherwise it does not matter because we will write it
# the next iteration. this ensures we do not write the
# final newline into the stream. That way we do not have to
# truncate the stream. However we do have to make sure that
# if something else than a newline is in there we write it
# out.
if line[-2:] == b"\r\n":
buf = b"\r\n"
cutoff = -2
else:
buf = line[-1:]
cutoff = -1
yield _cont, line[:cutoff]
else: # pragma: no cover
raise ValueError("unexpected end of part")
# if we have a leftover in the buffer that is not a newline
# character we have to flush it, otherwise we will chop of
# certain values.
if buf not in (b"", b"\r", b"\n", b"\r\n"):
yield _cont, buf
yield _end, None
def parse_parts(self, file, boundary, content_length):
"""Generate ``('file', (name, val))`` and
``('form', (name, val))`` parts.
"""
in_memory = 0
for ellt, ell in self.parse_lines(file, boundary, content_length):
if ellt == _begin_file:
headers, name, filename = ell
is_file = True
guard_memory = False
filename, container = self.start_file_streaming(
filename, headers, content_length
)
_write = container.write
elif ellt == _begin_form:
headers, name = ell
is_file = False
container = []
_write = container.append
guard_memory = self.max_form_memory_size is not None
elif ellt == _cont:
_write(ell)
# if we write into memory and there is a memory size limit we
# count the number of bytes in memory and raise an exception if
# there is too much data in memory.
if guard_memory:
in_memory += len(ell)
if in_memory > self.max_form_memory_size:
self.in_memory_threshold_reached(in_memory)
elif ellt == _end:
if is_file:
container.seek(0)
yield (
"file",
(name, FileStorage(container, filename, name, headers=headers)),
)
else:
part_charset = self.get_part_charset(headers)
yield (
"form",
(name, b"".join(container).decode(part_charset, self.errors)),
)
def parse(self, file, boundary, content_length):
formstream, filestream = tee(
self.parse_parts(file, boundary, content_length), 2
)
form = (p[1] for p in formstream if p[0] == "form")
files = (p[1] for p in filestream if p[0] == "file")
return self.cls(form), self.cls(files)
from . import exceptions
| apache-2.0 |
apark263/tensorflow | tensorflow/contrib/summary/summary.py | 23 | 3781 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Summary API v2.
The operations in this package are safe to use with eager execution turned on or
off. It has a more flexible API that allows summaries to be written directly
from ops to places other than event log files, rather than propagating protos
from `tf.summary.merge_all` to `tf.summary.FileWriter`.
To use with eager execution enabled, write your code as follows:
```python
global_step = tf.train.get_or_create_global_step()
summary_writer = tf.contrib.summary.create_file_writer(
train_dir, flush_millis=10000)
with summary_writer.as_default(), tf.contrib.summary.always_record_summaries():
# model code goes here
# and in it call
tf.contrib.summary.scalar("loss", my_loss)
# In this case every call to tf.contrib.summary.scalar will generate a record
# ...
```
To use it with graph execution, write your code as follows:
```python
global_step = tf.train.get_or_create_global_step()
summary_writer = tf.contrib.summary.create_file_writer(
train_dir, flush_millis=10000)
with summary_writer.as_default(), tf.contrib.summary.always_record_summaries():
# model definition code goes here
# and in it call
tf.contrib.summary.scalar("loss", my_loss)
# In this case every call to tf.contrib.summary.scalar will generate an op,
# note the need to run tf.contrib.summary.all_summary_ops() to make sure these
# ops get executed.
# ...
train_op = ....
with tf.Session(...) as sess:
tf.global_variables_initializer().run()
tf.contrib.summary.initialize(graph=tf.get_default_graph())
# ...
while not_done_training:
sess.run([train_op, tf.contrib.summary.all_summary_ops()])
# ...
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.ops.summary_ops_v2 import all_summary_ops
from tensorflow.python.ops.summary_ops_v2 import always_record_summaries
from tensorflow.python.ops.summary_ops_v2 import audio
from tensorflow.python.ops.summary_ops_v2 import create_db_writer
from tensorflow.python.ops.summary_ops_v2 import create_file_writer
from tensorflow.python.ops.summary_ops_v2 import create_summary_file_writer
from tensorflow.python.ops.summary_ops_v2 import eval_dir
from tensorflow.python.ops.summary_ops_v2 import flush
from tensorflow.python.ops.summary_ops_v2 import generic
from tensorflow.python.ops.summary_ops_v2 import graph
from tensorflow.python.ops.summary_ops_v2 import histogram
from tensorflow.python.ops.summary_ops_v2 import image
from tensorflow.python.ops.summary_ops_v2 import import_event
from tensorflow.python.ops.summary_ops_v2 import initialize
from tensorflow.python.ops.summary_ops_v2 import never_record_summaries
from tensorflow.python.ops.summary_ops_v2 import record_summaries_every_n_global_steps
from tensorflow.python.ops.summary_ops_v2 import scalar
from tensorflow.python.ops.summary_ops_v2 import should_record_summaries
from tensorflow.python.ops.summary_ops_v2 import summary_writer_initializer_op
from tensorflow.python.ops.summary_ops_v2 import SummaryWriter
| apache-2.0 |
rbtcollins/pip | tests/lib/git_submodule_helpers.py | 58 | 2960 | from __future__ import absolute_import
import textwrap
def _create_test_package_submodule(env):
env.scratch_path.join("version_pkg_submodule").mkdir()
submodule_path = env.scratch_path / 'version_pkg_submodule'
env.run('touch', 'testfile', cwd=submodule_path)
env.run('git', 'init', cwd=submodule_path)
env.run('git', 'add', '.', cwd=submodule_path)
env.run('git', 'commit', '-q',
'--author', 'pip <[email protected]>',
'-am', 'initial version / submodule', cwd=submodule_path)
return submodule_path
def _change_test_package_submodule(env, submodule_path):
submodule_path.join("testfile").write("this is a changed file")
submodule_path.join("testfile2").write("this is an added file")
env.run('git', 'add', '.', cwd=submodule_path)
env.run('git', 'commit', '-q',
'--author', 'pip <[email protected]>',
'-am', 'submodule change', cwd=submodule_path)
def _pull_in_submodule_changes_to_module(env, module_path):
env.run(
'git',
'pull',
'-q',
'origin',
'master',
cwd=module_path / 'testpkg/static/',
)
env.run('git', 'commit', '-q',
'--author', 'pip <[email protected]>',
'-am', 'submodule change', cwd=module_path)
def _create_test_package_with_submodule(env):
env.scratch_path.join("version_pkg").mkdir()
version_pkg_path = env.scratch_path / 'version_pkg'
version_pkg_path.join("testpkg").mkdir()
pkg_path = version_pkg_path / 'testpkg'
pkg_path.join("__init__.py").write("# hello there")
pkg_path.join("version_pkg.py").write(textwrap.dedent('''\
def main():
print('0.1')
'''))
version_pkg_path.join("setup.py").write(textwrap.dedent('''\
from setuptools import setup, find_packages
setup(name='version_pkg',
version='0.1',
packages=find_packages(),
)
'''))
env.run('git', 'init', cwd=version_pkg_path, expect_error=True)
env.run('git', 'add', '.', cwd=version_pkg_path, expect_error=True)
env.run('git', 'commit', '-q',
'--author', 'pip <[email protected]>',
'-am', 'initial version', cwd=version_pkg_path,
expect_error=True)
submodule_path = _create_test_package_submodule(env)
env.run(
'git',
'submodule',
'add',
submodule_path,
'testpkg/static',
cwd=version_pkg_path,
expect_error=True,
)
env.run('git', 'commit', '-q',
'--author', 'pip <[email protected]>',
'-am', 'initial version w submodule', cwd=version_pkg_path,
expect_error=True)
return version_pkg_path, submodule_path
| mit |
vicky2135/lucious | oscar/lib/python2.7/site-packages/prompt_toolkit/token.py | 23 | 1420 | """
The Token class, interchangeable with ``pygments.token``.
A `Token` has some semantics for a piece of text that is given a style through
a :class:`~prompt_toolkit.styles.Style` class. A pygments lexer for instance,
returns a list of (Token, text) tuples. Each fragment of text has a token
assigned, which when combined with a style sheet, will determine the fine
style.
"""
# If we don't need any lexers or style classes from Pygments, we don't want
# Pygments to be installed for only the following 10 lines of code. So, there
# is some duplication, but this should stay compatible with Pygments.
__all__ = (
'Token',
'ZeroWidthEscape',
)
class _TokenType(tuple):
def __getattr__(self, val):
if not val or not val[0].isupper():
return tuple.__getattribute__(self, val)
new = _TokenType(self + (val,))
setattr(self, val, new)
return new
def __repr__(self):
return 'Token' + (self and '.' or '') + '.'.join(self)
# Prefer the Token class from Pygments. If Pygments is not installed, use our
# minimalistic Token class.
try:
from pygments.token import Token
except ImportError:
Token = _TokenType()
# Built-in tokens:
#: `ZeroWidthEscape` can be used for raw VT escape sequences that don't
#: cause the cursor position to move. (E.g. FinalTerm's escape sequences
#: for shell integration.)
ZeroWidthEscape = Token.ZeroWidthEscape
| bsd-3-clause |
jeffmahoney/crash-python | crash/commands/syscmd.py | 1 | 2069 | # -*- coding: utf-8 -*-
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
"""
SUMMARY
-------
Display system information and configuration data
::
sys [config]
DESCRIPTION
-----------
This command displays system-specific data. If no arguments are entered,
the same system data shown during crash invocation is shown.
``config`` If the kernel was configured with ``CONFIG_IKCONFIG``, then
dump the in-kernel configuration data.
EXAMPLES
--------
Display essential system information:
::
py-crash> sys config
KERNEL: vmlinux.4
DUMPFILE: lcore.cr.4
CPUS: 4
DATE: Mon Oct 11 18:48:55 1999
UPTIME: 10 days, 14:14:39
LOAD AVERAGE: 0.74, 0.23, 0.08
TASKS: 77
NODENAME: test.mclinux.com
RELEASE: 2.2.5-15smp
VERSION: #24 SMP Mon Oct 11 17:41:40 CDT 1999
MACHINE: i686 (500 MHz)
MEMORY: 1 GB
"""
import argparse
from crash.commands import Command, ArgumentParser
from crash.commands import CommandLineError
from crash.cache.syscache import utsname, config, kernel
class SysCommand(Command):
"""system data"""
def __init__(self, name: str) -> None:
parser = ArgumentParser(prog=name)
parser.add_argument('config', nargs='?')
Command.__init__(self, name, parser)
@staticmethod
def show_default() -> None:
print(" UPTIME: {}".format(kernel.uptime))
print("LOAD AVERAGE: {}".format(kernel.loadavg))
print(" NODENAME: {}".format(utsname.nodename))
print(" RELEASE: {}".format(utsname.release))
print(" VERSION: {}".format(utsname.version))
print(" MACHINE: {}".format(utsname.machine))
def execute(self, args: argparse.Namespace) -> None:
if args.config:
if args.config == "config":
print(config)
else:
raise CommandLineError(f"error: unknown option: {args.config}")
else:
self.show_default()
SysCommand("sys")
| gpl-2.0 |
EmreAtes/spack | var/spack/repos/builtin/packages/startup-notification/package.py | 5 | 1756 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class StartupNotification(AutotoolsPackage):
"""startup-notification contains a reference implementation of the
freedesktop startup notification protocol."""
homepage = "https://www.freedesktop.org/wiki/Software/startup-notification/"
url = "http://www.freedesktop.org/software/startup-notification/releases/startup-notification-0.12.tar.gz"
version('0.12', '2cd77326d4dcaed9a5a23a1232fb38e9')
depends_on('libx11')
depends_on('libxcb')
depends_on('xcb-util')
| lgpl-2.1 |
FMCalisto/FMCalisto.github.io | node_modules/node-gyp/gyp/pylib/gyp/easy_xml.py | 65 | 4989 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import os
import locale
def XmlToString(content, encoding='utf-8', pretty=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Visual Studio files have a lot of pre-defined structures. This function makes
it easy to represent these structures as Python data structures, instead of
having to create a lot of function calls.
Each XML element of the content is represented as a list composed of:
1. The name of the element, a string,
2. The attributes of the element, a dictionary (optional), and
3+. The content of the element, if any. Strings are simple text nodes and
lists are child elements.
Example 1:
<test/>
becomes
['test']
Example 2:
<myelement a='value1' b='value2'>
<childtype>This is</childtype>
<childtype>it!</childtype>
</myelement>
becomes
['myelement', {'a':'value1', 'b':'value2'},
['childtype', 'This is'],
['childtype', 'it!'],
]
Args:
content: The structured content to be converted.
encoding: The encoding to report on the first XML line.
pretty: True if we want pretty printing with indents and new lines.
Returns:
The XML content as a string.
"""
# We create a huge list of all the elements of the file.
xml_parts = ['<?xml version="1.0" encoding="%s"?>' % encoding]
if pretty:
xml_parts.append('\n')
_ConstructContentList(xml_parts, content, pretty)
# Convert it to a string
return ''.join(xml_parts)
def _ConstructContentList(xml_parts, specification, pretty, level=0):
""" Appends the XML parts corresponding to the specification.
Args:
xml_parts: A list of XML parts to be appended to.
specification: The specification of the element. See EasyXml docs.
pretty: True if we want pretty printing with indents and new lines.
level: Indentation level.
"""
# The first item in a specification is the name of the element.
if pretty:
indentation = ' ' * level
new_line = '\n'
else:
indentation = ''
new_line = ''
name = specification[0]
if not isinstance(name, str):
raise Exception('The first item of an EasyXml specification should be '
'a string. Specification was ' + str(specification))
xml_parts.append(indentation + '<' + name)
# Optionally in second position is a dictionary of the attributes.
rest = specification[1:]
if rest and isinstance(rest[0], dict):
for at, val in sorted(rest[0].iteritems()):
xml_parts.append(' %s="%s"' % (at, _XmlEscape(val, attr=True)))
rest = rest[1:]
if rest:
xml_parts.append('>')
all_strings = reduce(lambda x, y: x and isinstance(y, str), rest, True)
multi_line = not all_strings
if multi_line and new_line:
xml_parts.append(new_line)
for child_spec in rest:
# If it's a string, append a text node.
# Otherwise recurse over that child definition
if isinstance(child_spec, str):
xml_parts.append(_XmlEscape(child_spec))
else:
_ConstructContentList(xml_parts, child_spec, pretty, level + 1)
if multi_line and indentation:
xml_parts.append(indentation)
xml_parts.append('</%s>%s' % (name, new_line))
else:
xml_parts.append('/>%s' % new_line)
def WriteXmlIfChanged(content, path, encoding='utf-8', pretty=False,
win32=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Args:
content: The structured content to be written.
path: Location of the file.
encoding: The encoding to report on the first line of the XML file.
pretty: True if we want pretty printing with indents and new lines.
"""
xml_string = XmlToString(content, encoding, pretty)
if win32 and os.linesep != '\r\n':
xml_string = xml_string.replace('\n', '\r\n')
default_encoding = locale.getdefaultlocale()[1]
if default_encoding.upper() != encoding.upper():
xml_string = xml_string.decode(default_encoding).encode(encoding)
# Get the old content
try:
f = open(path, 'r')
existing = f.read()
f.close()
except:
existing = None
# It has changed, write it
if existing != xml_string:
f = open(path, 'w')
f.write(xml_string)
f.close()
_xml_escape_map = {
'"': '"',
"'": ''',
'<': '<',
'>': '>',
'&': '&',
'\n': '
',
'\r': '
',
}
_xml_escape_re = re.compile(
"(%s)" % "|".join(map(re.escape, _xml_escape_map.keys())))
def _XmlEscape(value, attr=False):
""" Escape a string for inclusion in XML."""
def replace(match):
m = match.string[match.start() : match.end()]
# don't replace single quotes in attrs
if attr and m == "'":
return m
return _xml_escape_map[m]
return _xml_escape_re.sub(replace, value)
| mit |
grevutiu-gabriel/sympy | sympy/geometry/tests/test_plane.py | 36 | 7702 | from __future__ import division
from sympy import (Abs, I, Dummy, Rational, Float, S, Symbol, cos, oo, pi,
simplify, sin, sqrt, symbols, Derivative, asin, acos)
from sympy.geometry import (Circle, Curve, Ellipse, GeometryError, Line, Point,
Polygon, Ray, RegularPolygon, Segment, Triangle,
are_similar, convex_hull, intersection,
Point3D, Line3D, Ray3D, Segment3D, Plane, centroid)
from sympy.geometry.util import are_coplanar
from sympy.utilities.pytest import raises, slow
x = Symbol('x', real=True)
y = Symbol('y', real=True)
z = Symbol('z', real=True)
t = Symbol('t', real=True)
k = Symbol('k', real=True)
x1 = Symbol('x1', real=True)
x2 = Symbol('x2', real=True)
x3 = Symbol('x3', real=True)
y1 = Symbol('y1', real=True)
y2 = Symbol('y2', real=True)
y3 = Symbol('y3', real=True)
z1 = Symbol('z1', real=True)
z2 = Symbol('z2', real=True)
z3 = Symbol('z3', real=True)
half = Rational(1, 2)
def feq(a, b):
"""Test if two floating point values are 'equal'."""
t = Float("1.0E-10")
return -t < a - b < t
@slow
def test_plane():
p1 = Point3D(0, 0, 0)
p2 = Point3D(1, 1, 1)
p3 = Point3D(1, 2, 3)
p4 = Point3D(x, x, x)
p5 = Point3D(y, y, y)
pl3 = Plane(p1, p2, p3)
pl4 = Plane(p1, normal_vector=(1, 1, 1))
pl4b = Plane(p1, p2)
pl5 = Plane(p3, normal_vector=(1, 2, 3))
pl6 = Plane(Point3D(2, 3, 7), normal_vector=(2, 2, 2))
pl7 = Plane(Point3D(1, -5, -6), normal_vector=(1, -2, 1))
l1 = Line3D(Point3D(5, 0, 0), Point3D(1, -1, 1))
l2 = Line3D(Point3D(0, -2, 0), Point3D(3, 1, 1))
l3 = Line3D(Point3D(0, -1, 0), Point3D(5, -1, 9))
assert Plane(p1, p2, p3) != Plane(p1, p3, p2)
assert Plane(p1, p2, p3).is_coplanar(Plane(p1, p3, p2))
assert pl3 == Plane(Point3D(0, 0, 0), normal_vector=(1, -2, 1))
assert pl3 != pl4
assert pl4 == pl4b
assert pl5 == Plane(Point3D(1, 2, 3), normal_vector=(1, 2, 3))
assert pl5.equation(x, y, z) == x + 2*y + 3*z - 14
assert pl3.equation(x, y, z) == x - 2*y + z
assert pl3.p1 == p1
assert pl4.p1 == p1
assert pl5.p1 == p3
assert pl4.normal_vector == (1, 1, 1)
assert pl5.normal_vector == (1, 2, 3)
assert p1 in pl3
assert p1 in pl4
assert p3 in pl5
assert pl3.projection(Point(0, 0)) == p1
p = pl3.projection(Point3D(1, 1, 0))
assert p == Point3D(7/6, 2/3, 1/6)
assert p in pl3
l = pl3.projection_line(Line(Point(0, 0), Point(1, 1)))
assert l == Line3D(Point3D(0, 0, 0), Point3D(7/6, 2/3, 1/6))
assert l in pl3
# get a segment that does not intersect the plane which is also
# parallel to pl3's normal veector
t = Dummy()
r = pl3.random_point()
a = pl3.perpendicular_line(r).arbitrary_point(t)
s = Segment3D(a.subs(t, 1), a.subs(t, 2))
assert s.p1 not in pl3 and s.p2 not in pl3
assert pl3.projection_line(s).equals(r)
assert pl3.projection_line(Segment(Point(1, 0), Point(1, 1))) == \
Segment3D(Point3D(5/6, 1/3, -1/6), Point3D(7/6, 2/3, 1/6))
assert pl6.projection_line(Ray(Point(1, 0), Point(1, 1))) == \
Ray3D(Point3D(14/3, 11/3, 11/3), Point3D(13/3, 13/3, 10/3))
assert pl3.perpendicular_line(r.args) == pl3.perpendicular_line(r)
assert pl3.is_parallel(pl6) is False
assert pl4.is_parallel(pl6)
assert pl6.is_parallel(l1) is False
assert pl3.is_perpendicular(pl6)
assert pl4.is_perpendicular(pl7)
assert pl6.is_perpendicular(pl7)
assert pl6.is_perpendicular(l1) is False
assert pl7.distance(Point3D(1, 3, 5)) == 5*sqrt(6)/6
assert pl6.distance(Point3D(0, 0, 0)) == 4*sqrt(3)
assert pl6.distance(pl6.p1) == 0
assert pl7.distance(pl6) == 0
assert pl7.distance(l1) == 0
assert pl6.distance(Segment3D(Point3D(2, 3, 1), Point3D(1, 3, 4))) == 0
pl6.distance(Plane(Point3D(5, 5, 5), normal_vector=(8, 8, 8))) == sqrt(3)
assert pl6.angle_between(pl3) == pi/2
assert pl6.angle_between(pl6) == 0
assert pl6.angle_between(pl4) == 0
assert pl7.angle_between(Line3D(Point3D(2, 3, 5), Point3D(2, 4, 6))) == \
-asin(sqrt(3)/6)
assert pl6.angle_between(Ray3D(Point3D(2, 4, 1), Point3D(6, 5, 3))) == \
asin(sqrt(7)/3)
assert pl7.angle_between(Segment3D(Point3D(5, 6, 1), Point3D(1, 2, 4))) == \
-asin(7*sqrt(246)/246)
assert are_coplanar(l1, l2, l3) is False
assert are_coplanar(l1) is False
assert are_coplanar(Point3D(2, 7, 2), Point3D(0, 0, 2),
Point3D(1, 1, 2), Point3D(1, 2, 2))
assert are_coplanar(Plane(p1, p2, p3), Plane(p1, p3, p2))
assert Plane.are_concurrent(pl3, pl4, pl5) is False
assert Plane.are_concurrent(pl6) is False
raises(ValueError, lambda: Plane.are_concurrent(Point3D(0, 0, 0)))
assert pl3.parallel_plane(Point3D(1, 2, 5)) == Plane(Point3D(1, 2, 5), \
normal_vector=(1, -2, 1))
# perpendicular_plane
p = Plane((0, 0, 0), (1, 0, 0))
# default
assert p.perpendicular_plane() == Plane(Point3D(0, 0, 0), (0, 1, 0))
# 1 pt
assert p.perpendicular_plane(Point3D(1, 0, 1)) == \
Plane(Point3D(1, 0, 1), (0, 1, 0))
# pts as tuples
assert p.perpendicular_plane((1, 0, 1), (1, 1, 1)) == \
Plane(Point3D(1, 0, 1), (0, 0, -1))
a, b = Point3D(0, 0, 0), Point3D(0, 1, 0)
Z = (0, 0, 1)
p = Plane(a, normal_vector=Z)
# case 4
assert p.perpendicular_plane(a, b) == Plane(a, (1, 0, 0))
n = Point3D(*Z)
# case 1
assert p.perpendicular_plane(a, n) == Plane(a, (-1, 0, 0))
# case 2
assert Plane(a, normal_vector=b.args).perpendicular_plane(a, a + b) == \
Plane(Point3D(0, 0, 0), (1, 0, 0))
# case 1&3
assert Plane(b, normal_vector=Z).perpendicular_plane(b, b + n) == \
Plane(Point3D(0, 1, 0), (-1, 0, 0))
# case 2&3
assert Plane(b, normal_vector=b.args).perpendicular_plane(n, n + b) == \
Plane(Point3D(0, 0, 1), (1, 0, 0))
assert pl6.intersection(pl6) == [pl6]
assert pl4.intersection(pl4.p1) == [pl4.p1]
assert pl3.intersection(pl6) == [
Line3D(Point3D(8, 4, 0), Point3D(2, 4, 6))]
assert pl3.intersection(Line3D(Point3D(1,2,4), Point3D(4,4,2))) == [
Point3D(2, 8/3, 10/3)]
assert pl3.intersection(Plane(Point3D(6, 0, 0), normal_vector=(2, -5, 3))
) == [Line3D(Point3D(-24, -12, 0), Point3D(-25, -13, -1))]
assert pl6.intersection(Ray3D(Point3D(2, 3, 1), Point3D(1, 3, 4))) == [
Point3D(-1, 3, 10)]
assert pl6.intersection(Segment3D(Point3D(2, 3, 1), Point3D(1, 3, 4))) == [
Point3D(-1, 3, 10)]
assert pl7.intersection(Line(Point(2, 3), Point(4, 2))) == [
Point3D(13/2, 3/4, 0)]
r = Ray(Point(2, 3), Point(4, 2))
assert Plane((1,2,0), normal_vector=(0,0,1)).intersection(r) == [
Ray3D(Point(2, 3), Point(4, 2))]
assert pl3.random_point() in pl3
# issue 8570
l2 = Line3D(Point3D(S(50000004459633)/5000000000000,
-S(891926590718643)/1000000000000000,
S(231800966893633)/100000000000000),
Point3D(S(50000004459633)/50000000000000,
-S(222981647679771)/250000000000000,
S(231800966893633)/100000000000000))
p2 = Plane(Point3D(S(402775636372767)/100000000000000,
-S(97224357654973)/100000000000000,
S(216793600814789)/100000000000000),
(-S('9.00000087501922'), -S('4.81170658872543e-13'),
S('0.0')))
assert str([i.n(2) for i in p2.intersection(l2)]) == \
'[Point3D(4.0, -0.89, 2.3)]'
| bsd-3-clause |
rvalera01/platalist | cloudflare.py | 221 | 2812 | import sys,traceback,urllib2,re, urllib,xbmc
def createCookie(url,cj=None,agent='Mozilla/5.0 (Windows NT 6.1; rv:32.0) Gecko/20100101 Firefox/32.0'):
urlData=''
try:
import urlparse,cookielib,urllib2
class NoRedirection(urllib2.HTTPErrorProcessor):
def http_response(self, request, response):
return response
def parseJSString(s):
try:
offset=1 if s[0]=='+' else 0
val = int(eval(s.replace('!+[]','1').replace('!![]','1').replace('[]','0').replace('(','str(')[offset:]))
return val
except:
pass
#agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0'
if cj==None:
cj = cookielib.CookieJar()
opener = urllib2.build_opener(NoRedirection, urllib2.HTTPCookieProcessor(cj))
opener.addheaders = [('User-Agent', agent)]
response = opener.open(url)
result=urlData = response.read()
response.close()
# print result
# print response.headers
jschl = re.compile('name="jschl_vc" value="(.+?)"/>').findall(result)[0]
init = re.compile('setTimeout\(function\(\){\s*.*?.*:(.*?)};').findall(result)[0]
builder = re.compile(r"challenge-form\'\);\s*(.*)a.v").findall(result)[0]
decryptVal = parseJSString(init)
lines = builder.split(';')
for line in lines:
if len(line)>0 and '=' in line:
sections=line.split('=')
line_val = parseJSString(sections[1])
decryptVal = int(eval(str(decryptVal)+sections[0][-1]+str(line_val)))
# print urlparse.urlparse(url).netloc
answer = decryptVal + len(urlparse.urlparse(url).netloc)
u='/'.join(url.split('/')[:-1])
query = '%s/cdn-cgi/l/chk_jschl?jschl_vc=%s&jschl_answer=%s' % (u, jschl, answer)
if 'type="hidden" name="pass"' in result:
passval=re.compile('name="pass" value="(.*?)"').findall(result)[0]
query = '%s/cdn-cgi/l/chk_jschl?pass=%s&jschl_vc=%s&jschl_answer=%s' % (u,urllib.quote_plus(passval), jschl, answer)
xbmc.sleep(4*1000) ##sleep so that the call work
# print query
# import urllib2
# opener = urllib2.build_opener(NoRedirection,urllib2.HTTPCookieProcessor(cj))
# opener.addheaders = [('User-Agent', agent)]
#print opener.headers
response = opener.open(query)
# print response.headers
#cookie = str(response.headers.get('Set-Cookie'))
#response = opener.open(url)
#print cj
# print response.read()
response.close()
return urlData
except:
traceback.print_exc(file=sys.stdout)
return urlData
| gpl-2.0 |
dstftw/youtube-dl | youtube_dl/extractor/r7.py | 53 | 4600 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import int_or_none
class R7IE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://
(?:
(?:[a-zA-Z]+)\.r7\.com(?:/[^/]+)+/idmedia/|
noticias\.r7\.com(?:/[^/]+)+/[^/]+-|
player\.r7\.com/video/i/
)
(?P<id>[\da-f]{24})
'''
_TESTS = [{
'url': 'http://videos.r7.com/policiais-humilham-suspeito-a-beira-da-morte-morre-com-dignidade-/idmedia/54e7050b0cf2ff57e0279389.html',
'md5': '403c4e393617e8e8ddc748978ee8efde',
'info_dict': {
'id': '54e7050b0cf2ff57e0279389',
'ext': 'mp4',
'title': 'Policiais humilham suspeito à beira da morte: "Morre com dignidade"',
'description': 'md5:01812008664be76a6479aa58ec865b72',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 98,
'like_count': int,
'view_count': int,
},
}, {
'url': 'http://esportes.r7.com/videos/cigano-manda-recado-aos-fas/idmedia/4e176727b51a048ee6646a1b.html',
'only_matching': True,
}, {
'url': 'http://noticias.r7.com/record-news/video/representante-do-instituto-sou-da-paz-fala-sobre-fim-do-estatuto-do-desarmamento-5480fc580cf2285b117f438d/',
'only_matching': True,
}, {
'url': 'http://player.r7.com/video/i/54e7050b0cf2ff57e0279389?play=true&video=http://vsh.r7.com/54e7050b0cf2ff57e0279389/ER7_RE_BG_MORTE_JOVENS_570kbps_2015-02-2009f17818-cc82-4c8f-86dc-89a66934e633-ATOS_copy.mp4&linkCallback=http://videos.r7.com/policiais-humilham-suspeito-a-beira-da-morte-morre-com-dignidade-/idmedia/54e7050b0cf2ff57e0279389.html&thumbnail=http://vtb.r7.com/ER7_RE_BG_MORTE_JOVENS_570kbps_2015-02-2009f17818-cc82-4c8f-86dc-89a66934e633-thumb.jpg&idCategory=192&share=true&layout=full&full=true',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._download_json(
'http://player-api.r7.com/video/i/%s' % video_id, video_id)
title = video['title']
formats = []
media_url_hls = video.get('media_url_hls')
if media_url_hls:
formats.extend(self._extract_m3u8_formats(
media_url_hls, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
media_url = video.get('media_url')
if media_url:
f = {
'url': media_url,
'format_id': 'http',
}
# m3u8 format always matches the http format, let's copy metadata from
# one to another
m3u8_formats = list(filter(
lambda f: f.get('vcodec') != 'none', formats))
if len(m3u8_formats) == 1:
f_copy = m3u8_formats[0].copy()
f_copy.update(f)
f_copy['protocol'] = 'http'
f = f_copy
formats.append(f)
self._sort_formats(formats)
description = video.get('description')
thumbnail = video.get('thumb')
duration = int_or_none(video.get('media_duration'))
like_count = int_or_none(video.get('likes'))
view_count = int_or_none(video.get('views'))
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'like_count': like_count,
'view_count': view_count,
'formats': formats,
}
class R7ArticleIE(InfoExtractor):
_VALID_URL = r'https?://(?:[a-zA-Z]+)\.r7\.com/(?:[^/]+/)+[^/?#&]+-(?P<id>\d+)'
_TEST = {
'url': 'http://tv.r7.com/record-play/balanco-geral/videos/policiais-humilham-suspeito-a-beira-da-morte-morre-com-dignidade-16102015',
'only_matching': True,
}
@classmethod
def suitable(cls, url):
return False if R7IE.suitable(url) else super(R7ArticleIE, cls).suitable(url)
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(
r'<div[^>]+(?:id=["\']player-|class=["\']embed["\'][^>]+id=["\'])([\da-f]{24})',
webpage, 'video id')
return self.url_result('http://player.r7.com/video/i/%s' % video_id, R7IE.ie_key())
| unlicense |
jrbl/invenio | modules/webmessage/lib/webmessage.py | 20 | 19732 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" WebMessage module, messaging system"""
__revision__ = "$Id$"
import invenio.webmessage_dblayer as db
from invenio.webmessage_config import CFG_WEBMESSAGE_STATUS_CODE, \
CFG_WEBMESSAGE_RESULTS_FIELD, \
CFG_WEBMESSAGE_SEPARATOR, \
CFG_WEBMESSAGE_ROLES_WITHOUT_QUOTA, \
InvenioWebMessageError
from invenio.config import CFG_SITE_LANG, \
CFG_WEBMESSAGE_MAX_SIZE_OF_MESSAGE
from invenio.messages import gettext_set_language
from invenio.dateutils import datetext_default, get_datetext
from invenio.htmlutils import escape_html
from invenio.webuser import list_users_in_roles
try:
import invenio.template
webmessage_templates = invenio.template.load('webmessage')
except:
pass
from invenio.errorlib import register_exception
def perform_request_display_msg(uid, msgid, ln=CFG_SITE_LANG):
"""
Displays a specific message
@param uid: user id
@param msgid: message id
@return: body
"""
_ = gettext_set_language(ln)
body = ""
if (db.check_user_owns_message(uid, msgid) == 0):
# The user doesn't own this message
try:
raise InvenioWebMessageError(_('Sorry, this message in not in your mailbox.'))
except InvenioWebMessageError, exc:
register_exception()
body = webmessage_templates.tmpl_error(exc.message, ln)
return body
else:
(msg_id,
msg_from_id, msg_from_nickname,
msg_sent_to, msg_sent_to_group,
msg_subject, msg_body,
msg_sent_date, msg_received_date,
msg_status) = db.get_message(uid, msgid)
if (msg_id == ""):
# The message exists in table user_msgMESSAGE
# but not in table msgMESSAGE => table inconsistency
try:
raise InvenioWebMessageError(_('This message does not exist.'))
except InvenioWebMessageError, exc:
register_exception()
body = webmessage_templates.tmpl_error(exc.message, ln)
return body
else:
if (msg_status == CFG_WEBMESSAGE_STATUS_CODE['NEW']):
db.set_message_status(uid, msgid,
CFG_WEBMESSAGE_STATUS_CODE['READ'])
body = webmessage_templates.tmpl_display_msg(
msg_id,
msg_from_id,
msg_from_nickname,
msg_sent_to,
msg_sent_to_group,
msg_subject,
msg_body,
msg_sent_date,
msg_received_date,
ln)
return body
def perform_request_display(uid, warnings=[], infos=[], ln=CFG_SITE_LANG):
"""
Displays the user's Inbox
@param uid: user id
@return: body with warnings
"""
body = ""
rows = []
rows = db.get_all_messages_for_user(uid)
nb_messages = db.count_nb_messages(uid)
no_quota_users = list_users_in_roles(CFG_WEBMESSAGE_ROLES_WITHOUT_QUOTA)
no_quota = False
if uid in no_quota_users:
no_quota = True
body = webmessage_templates.tmpl_display_inbox(messages=rows,
infos=infos,
warnings=warnings,
nb_messages=nb_messages,
no_quota=no_quota,
ln=ln)
return body
def perform_request_delete_msg(uid, msgid, ln=CFG_SITE_LANG):
"""
Delete a given message from user inbox
@param uid: user id (int)
@param msgid: message id (int)
@param ln: language
@return: body with warnings
"""
_ = gettext_set_language(ln)
warnings = []
infos = []
body = ""
if (db.check_user_owns_message(uid, msgid) == 0):
# The user doesn't own this message
try:
raise InvenioWebMessageError(_('Sorry, this message in not in your mailbox.'))
except InvenioWebMessageError, exc:
register_exception()
body = webmessage_templates.tmpl_error(exc.message, ln)
return body
else:
if (db.delete_message_from_user_inbox(uid, msgid) == 0):
warnings.append(_("The message could not be deleted."))
else:
infos.append(_("The message was successfully deleted."))
return perform_request_display(uid, warnings, infos, ln)
def perform_request_delete_all(uid, confirmed=False, ln=CFG_SITE_LANG):
"""
Delete every message for a given user
@param uid: user id (int)
@param confirmed: 0 will produce a confirmation message
@param ln: language
@return: body with warnings
"""
infos = []
warnings = []
_ = gettext_set_language(ln)
if confirmed:
db.delete_all_messages(uid)
infos = [_("Your mailbox has been emptied.")]
return perform_request_display(uid, warnings, infos, ln)
else:
body = webmessage_templates.tmpl_confirm_delete(ln)
return body
def perform_request_write(uid,
msg_reply_id="",
msg_to="",
msg_to_group="",
msg_subject="",
msg_body="",
ln=CFG_SITE_LANG):
"""
Display a write a message page.
@param uid: user id.
@type uid: int
@param msg_reply_id: if this message is a reply to another, other's ID.
@type msg_reply_id: int
@param msg_to: comma separated usernames.
@type msg_to: string
@param msg_to_group: comma separated groupnames.
@type msg_to_group: string
@param msg_subject: message subject.
@type msg_subject: string
@param msg_body: message body.
@type msg_body: string
@param ln: language.
@type ln: string
@return: body with warnings.
"""
warnings = []
body = ""
_ = gettext_set_language(ln)
msg_from_nickname = ""
msg_id = 0
if (msg_reply_id):
if (db.check_user_owns_message(uid, msg_reply_id) == 0):
# The user doesn't own this message
try:
raise InvenioWebMessageError(_('Sorry, this message in not in your mailbox.'))
except InvenioWebMessageError, exc:
register_exception()
body = webmessage_templates.tmpl_error(exc.message, ln)
return body
else:
# dummy == variable name to make pylint and pychecker happy!
(msg_id,
msg_from_id, msg_from_nickname,
dummy, dummy,
msg_subject, msg_body,
dummy, dummy, dummy) = db.get_message(uid, msg_reply_id)
if (msg_id == ""):
# The message exists in table user_msgMESSAGE
# but not in table msgMESSAGE => table inconsistency
try:
raise InvenioWebMessageError(_('This message does not exist.'))
except InvenioWebMessageError, exc:
register_exception()
body = webmessage_templates.tmpl_error(exc.message, ln)
return body
else:
msg_to = msg_from_nickname or str(msg_from_id)
body = webmessage_templates.tmpl_write(msg_to=msg_to,
msg_to_group=msg_to_group,
msg_id=msg_id,
msg_subject=msg_subject,
msg_body=msg_body,
warnings=[],
ln=ln)
return body
def perform_request_write_with_search(
uid,
msg_to_user="",
msg_to_group="",
msg_subject="",
msg_body="",
msg_send_year=0,
msg_send_month=0,
msg_send_day=0,
names_selected=[],
search_pattern="",
results_field=CFG_WEBMESSAGE_RESULTS_FIELD['NONE'],
add_values=0,
ln=CFG_SITE_LANG):
"""
Display a write message page, with prefilled values
@param msg_to_user: comma separated usernames (str)
@param msg_to_group: comma separated groupnames (str)
@param msg_subject: message subject (str)
@param msg_bidy: message body (string)
@param msg_send_year: year to send this message on (int)
@param_msg_send_month: month to send this message on (int)
@param_msg_send_day: day to send this message on (int)
@param users_to_add: list of usernames ['str'] to add to msg_to_user
@param groups_to_add: list of groupnames ['str'] to add to msg_to_group
@param user_search_pattern: will search users with this pattern (str)
@param group_search_pattern: will search groups with this pattern (str)
@param mode_user: if 1 display user search box, else group search box
@param add_values: if 1 users_to_add will be added to msg_to_user field..
@param ln: language
@return: body with warnings
"""
warnings = []
search_results_list = []
def cat_names(name1, name2):
""" name1, name2 => 'name1, name2' """
return name1 + CFG_WEBMESSAGE_SEPARATOR + " " + name2
if results_field == CFG_WEBMESSAGE_RESULTS_FIELD['USER']:
if add_values and len(names_selected):
usernames_to_add = reduce(cat_names, names_selected)
if msg_to_user:
msg_to_user = cat_names(msg_to_user, usernames_to_add)
else:
msg_to_user = usernames_to_add
users_found = db.get_nicknames_like(search_pattern)
if users_found:
for user_name in users_found:
search_results_list.append((user_name[0],
user_name[0] in names_selected))
elif results_field == CFG_WEBMESSAGE_RESULTS_FIELD['GROUP']:
if add_values and len(names_selected):
groupnames_to_add = reduce(cat_names, names_selected)
if msg_to_group:
msg_to_group = cat_names(msg_to_group, groupnames_to_add)
else:
msg_to_group = groupnames_to_add
groups_dict = db.get_groupnames_like(uid, search_pattern)
groups_found = groups_dict.values()
if groups_found:
for group_name in groups_found:
search_results_list.append((group_name,
group_name in names_selected))
body = webmessage_templates.tmpl_write(
msg_to=msg_to_user,
msg_to_group=msg_to_group,
msg_subject=msg_subject,
msg_body=msg_body,
msg_send_year=msg_send_year,
msg_send_month=msg_send_month,
msg_send_day=msg_send_day,
warnings=warnings,
search_results_list=search_results_list,
search_pattern=search_pattern,
results_field=results_field,
ln=ln)
return body
def perform_request_send(uid,
msg_to_user="",
msg_to_group="",
msg_subject="",
msg_body="",
msg_send_year=0,
msg_send_month=0,
msg_send_day=0,
ln=CFG_SITE_LANG,
use_email_address = 0):
"""
send a message. if unable return warnings to write page
@param uid: id of user from (int)
@param msg_to_user: comma separated usernames (recipients) (str)
@param msg_to_group: comma separated groupnames (recipeints) (str)
@param msg_subject: subject of message (str)
@param msg_body: body of message (str)
@param msg_send_year: send this message on year x (int)
@param msg_send_month: send this message on month y (int)
@param msg_send_day: send this message on day z (int)
@param ln: language
@return: (body with warnings, title, navtrail)
"""
_ = gettext_set_language(ln)
def strip_spaces(text):
"""suppress spaces before and after x (str)"""
return text.strip()
# wash user input
users_to = map(strip_spaces, msg_to_user.split(CFG_WEBMESSAGE_SEPARATOR))
groups_to = map(strip_spaces, msg_to_group.split(CFG_WEBMESSAGE_SEPARATOR))
if users_to == ['']:
users_to = []
if groups_to == ['']:
groups_to = []
warnings = []
infos = []
problem = None
users_to_str = CFG_WEBMESSAGE_SEPARATOR.join(users_to)
groups_to_str = CFG_WEBMESSAGE_SEPARATOR.join(groups_to)
send_on_date = get_datetext(msg_send_year, msg_send_month, msg_send_day)
if (msg_send_year == msg_send_month == msg_send_day == 0):
status = CFG_WEBMESSAGE_STATUS_CODE['NEW']
else:
status = CFG_WEBMESSAGE_STATUS_CODE['REMINDER']
if send_on_date == datetext_default:
warning = \
_("The chosen date (%(x_year)i/%(x_month)i/%(x_day)i) is invalid.")
warning = warning % {'x_year': msg_send_year,
'x_month': msg_send_month,
'x_day': msg_send_day}
warnings.append(warning)
problem = True
if not(users_to_str or groups_to_str):
# <=> not(users_to_str) AND not(groups_to_str)
warnings.append(_("Please enter a user name or a group name."))
problem = True
if len(msg_body) > CFG_WEBMESSAGE_MAX_SIZE_OF_MESSAGE:
warnings.append(_("Your message is too long, please edit it. Maximum size allowed is %i characters.") % \
(CFG_WEBMESSAGE_MAX_SIZE_OF_MESSAGE,))
problem = True
if use_email_address == 0:
users_dict = db.get_uids_from_nicks(users_to)
users_to = users_dict.items() # users_to=[(nick, uid),(nick2, uid2)]
elif use_email_address == 1:
users_dict = db.get_uids_from_emails(users_to)
users_to = users_dict.items() # users_to=[(email, uid),(email2, uid2)]
groups_dict = db.get_gids_from_groupnames(groups_to)
groups_to = groups_dict.items()
gids_to = []
for (group_name, group_id) in groups_to:
if not(group_id):
warnings.append(_("Group %s does not exist.") % \
(escape_html(group_name)))
problem = 1
else:
gids_to.append(group_id)
# Get uids from gids
uids_from_group = db.get_uids_members_of_groups(gids_to)
# Add the original uids, and make sure there is no double values.
tmp_dict = {}
for uid_receiver in uids_from_group:
tmp_dict[uid_receiver] = None
for (user_nick, user_id) in users_to:
if user_id:
if user_id not in tmp_dict:
uids_from_group.append(user_id)
tmp_dict[user_id] = None
else:
if type(user_nick) == int or \
type(user_nick) == str and user_nick.isdigit():
user_nick = int(user_nick)
if db.user_exists(user_nick) and user_nick not in tmp_dict:
uids_from_group.append(user_nick)
tmp_dict[user_nick] = None
else:
warnings.append(_("User %s does not exist.")% \
(escape_html(user_nick)))
problem = True
if problem:
body = webmessage_templates.tmpl_write(msg_to=users_to_str,
msg_to_group=groups_to_str,
msg_subject=msg_subject,
msg_body=msg_body,
msg_send_year=msg_send_year,
msg_send_month=msg_send_month,
msg_send_day=msg_send_day,
warnings=warnings,
ln=ln)
title = _("Write a message")
navtrail = get_navtrail(ln, title)
return (body, title, navtrail)
else:
msg_id = db.create_message(uid,
users_to_str, groups_to_str,
msg_subject, msg_body,
send_on_date)
uid_problem = db.send_message(uids_from_group, msg_id, status)
if len(uid_problem) > 0:
usernames_problem_dict = db.get_nicks_from_uids(uid_problem)
usernames_problem = usernames_problem_dict.values()
def listing(name1, name2):
""" name1, name2 => 'name1, name2' """
return str(name1) + ", " + str(name2)
warning = _("Your message could not be sent to the following recipients due to their quota:") + " "
warnings.append(warning + reduce(listing, usernames_problem))
if len(uids_from_group) != len(uid_problem):
infos.append(_("Your message has been sent."))
else:
db.check_if_need_to_delete_message_permanently([msg_id])
body = perform_request_display(uid, warnings,
infos, ln)
title = _("Your Messages")
return (body, title, get_navtrail(ln))
def account_new_mail(uid, ln=CFG_SITE_LANG):
"""
display new mail info for myaccount.py page.
@param uid: user id (int)
@param ln: language
@return: html body
"""
nb_new_mail = db.get_nb_new_messages_for_user(uid)
total_mail = db.get_nb_readable_messages_for_user(uid)
return webmessage_templates.tmpl_account_new_mail(nb_new_mail,
total_mail, ln)
def get_navtrail(ln=CFG_SITE_LANG, title=""):
"""
gets the navtrail for title...
@param title: title of the page
@param ln: language
@return: HTML output
"""
navtrail = webmessage_templates.tmpl_navtrail(ln, title)
return navtrail
| gpl-2.0 |
peterlauri/django | django/utils/dateformat.py | 7 | 11927 | """
PHP date() style date formatting
See http://www.php.net/date for format strings
Usage:
>>> import datetime
>>> d = datetime.datetime.now()
>>> df = DateFormat(d)
>>> print(df.format('jS F Y H:i'))
7th October 2003 11:39
>>>
"""
from __future__ import unicode_literals
import calendar
import datetime
import re
import time
from django.utils import six
from django.utils.dates import (
MONTHS, MONTHS_3, MONTHS_ALT, MONTHS_AP, WEEKDAYS, WEEKDAYS_ABBR,
)
from django.utils.encoding import force_text
from django.utils.timezone import get_default_timezone, is_aware, is_naive
from django.utils.translation import ugettext as _
re_formatchars = re.compile(r'(?<!\\)([aAbBcdDeEfFgGhHiIjlLmMnNoOPrsStTUuwWyYzZ])')
re_escaped = re.compile(r'\\(.)')
class Formatter(object):
def format(self, formatstr):
pieces = []
for i, piece in enumerate(re_formatchars.split(force_text(formatstr))):
if i % 2:
if type(self.data) is datetime.date and hasattr(TimeFormat, piece):
raise TypeError(
"The format for date objects may not contain "
"time-related format specifiers (found '%s')." % piece
)
pieces.append(force_text(getattr(self, piece)()))
elif piece:
pieces.append(re_escaped.sub(r'\1', piece))
return ''.join(pieces)
class TimeFormat(Formatter):
def __init__(self, obj):
self.data = obj
self.timezone = None
# We only support timezone when formatting datetime objects,
# not date objects (timezone information not appropriate),
# or time objects (against established django policy).
if isinstance(obj, datetime.datetime):
if is_naive(obj):
self.timezone = get_default_timezone()
else:
self.timezone = obj.tzinfo
def a(self):
"'a.m.' or 'p.m.'"
if self.data.hour > 11:
return _('p.m.')
return _('a.m.')
def A(self):
"'AM' or 'PM'"
if self.data.hour > 11:
return _('PM')
return _('AM')
def B(self):
"Swatch Internet time"
raise NotImplementedError('may be implemented in a future release')
def e(self):
"""
Timezone name.
If timezone information is not available, this method returns
an empty string.
"""
if not self.timezone:
return ""
try:
if hasattr(self.data, 'tzinfo') and self.data.tzinfo:
# Have to use tzinfo.tzname and not datetime.tzname
# because datatime.tzname does not expect Unicode
return self.data.tzinfo.tzname(self.data) or ""
except NotImplementedError:
pass
return ""
def f(self):
"""
Time, in 12-hour hours and minutes, with minutes left off if they're
zero.
Examples: '1', '1:30', '2:05', '2'
Proprietary extension.
"""
if self.data.minute == 0:
return self.g()
return '%s:%s' % (self.g(), self.i())
def g(self):
"Hour, 12-hour format without leading zeros; i.e. '1' to '12'"
if self.data.hour == 0:
return 12
if self.data.hour > 12:
return self.data.hour - 12
return self.data.hour
def G(self):
"Hour, 24-hour format without leading zeros; i.e. '0' to '23'"
return self.data.hour
def h(self):
"Hour, 12-hour format; i.e. '01' to '12'"
return '%02d' % self.g()
def H(self):
"Hour, 24-hour format; i.e. '00' to '23'"
return '%02d' % self.G()
def i(self):
"Minutes; i.e. '00' to '59'"
return '%02d' % self.data.minute
def O(self): # NOQA: E743
"""
Difference to Greenwich time in hours; e.g. '+0200', '-0430'.
If timezone information is not available, this method returns
an empty string.
"""
if not self.timezone:
return ""
seconds = self.Z()
if seconds == "":
return ""
sign = '-' if seconds < 0 else '+'
seconds = abs(seconds)
return "%s%02d%02d" % (sign, seconds // 3600, (seconds // 60) % 60)
def P(self):
"""
Time, in 12-hour hours, minutes and 'a.m.'/'p.m.', with minutes left off
if they're zero and the strings 'midnight' and 'noon' if appropriate.
Examples: '1 a.m.', '1:30 p.m.', 'midnight', 'noon', '12:30 p.m.'
Proprietary extension.
"""
if self.data.minute == 0 and self.data.hour == 0:
return _('midnight')
if self.data.minute == 0 and self.data.hour == 12:
return _('noon')
return '%s %s' % (self.f(), self.a())
def s(self):
"Seconds; i.e. '00' to '59'"
return '%02d' % self.data.second
def T(self):
"""
Time zone of this machine; e.g. 'EST' or 'MDT'.
If timezone information is not available, this method returns
an empty string.
"""
if not self.timezone:
return ""
name = None
try:
name = self.timezone.tzname(self.data)
except Exception:
# pytz raises AmbiguousTimeError during the autumn DST change.
# This happens mainly when __init__ receives a naive datetime
# and sets self.timezone = get_default_timezone().
pass
if name is None:
name = self.format('O')
return six.text_type(name)
def u(self):
"Microseconds; i.e. '000000' to '999999'"
return '%06d' % self.data.microsecond
def Z(self):
"""
Time zone offset in seconds (i.e. '-43200' to '43200'). The offset for
timezones west of UTC is always negative, and for those east of UTC is
always positive.
If timezone information is not available, this method returns
an empty string.
"""
if not self.timezone:
return ""
try:
offset = self.timezone.utcoffset(self.data)
except Exception:
# pytz raises AmbiguousTimeError during the autumn DST change.
# This happens mainly when __init__ receives a naive datetime
# and sets self.timezone = get_default_timezone().
return ""
# `offset` is a datetime.timedelta. For negative values (to the west of
# UTC) only days can be negative (days=-1) and seconds are always
# positive. e.g. UTC-1 -> timedelta(days=-1, seconds=82800, microseconds=0)
# Positive offsets have days=0
return offset.days * 86400 + offset.seconds
class DateFormat(TimeFormat):
year_days = [None, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334]
def b(self):
"Month, textual, 3 letters, lowercase; e.g. 'jan'"
return MONTHS_3[self.data.month]
def c(self):
"""
ISO 8601 Format
Example : '2008-01-02T10:30:00.000123'
"""
return self.data.isoformat()
def d(self):
"Day of the month, 2 digits with leading zeros; i.e. '01' to '31'"
return '%02d' % self.data.day
def D(self):
"Day of the week, textual, 3 letters; e.g. 'Fri'"
return WEEKDAYS_ABBR[self.data.weekday()]
def E(self):
"Alternative month names as required by some locales. Proprietary extension."
return MONTHS_ALT[self.data.month]
def F(self):
"Month, textual, long; e.g. 'January'"
return MONTHS[self.data.month]
def I(self): # NOQA: E743
"'1' if Daylight Savings Time, '0' otherwise."
try:
if self.timezone and self.timezone.dst(self.data):
return '1'
else:
return '0'
except Exception:
# pytz raises AmbiguousTimeError during the autumn DST change.
# This happens mainly when __init__ receives a naive datetime
# and sets self.timezone = get_default_timezone().
return ''
def j(self):
"Day of the month without leading zeros; i.e. '1' to '31'"
return self.data.day
def l(self): # NOQA: E743
"Day of the week, textual, long; e.g. 'Friday'"
return WEEKDAYS[self.data.weekday()]
def L(self):
"Boolean for whether it is a leap year; i.e. True or False"
return calendar.isleap(self.data.year)
def m(self):
"Month; i.e. '01' to '12'"
return '%02d' % self.data.month
def M(self):
"Month, textual, 3 letters; e.g. 'Jan'"
return MONTHS_3[self.data.month].title()
def n(self):
"Month without leading zeros; i.e. '1' to '12'"
return self.data.month
def N(self):
"Month abbreviation in Associated Press style. Proprietary extension."
return MONTHS_AP[self.data.month]
def o(self):
"ISO 8601 year number matching the ISO week number (W)"
return self.data.isocalendar()[0]
def r(self):
"RFC 5322 formatted date; e.g. 'Thu, 21 Dec 2000 16:01:07 +0200'"
return self.format('D, j M Y H:i:s O')
def S(self):
"English ordinal suffix for the day of the month, 2 characters; i.e. 'st', 'nd', 'rd' or 'th'"
if self.data.day in (11, 12, 13): # Special case
return 'th'
last = self.data.day % 10
if last == 1:
return 'st'
if last == 2:
return 'nd'
if last == 3:
return 'rd'
return 'th'
def t(self):
"Number of days in the given month; i.e. '28' to '31'"
return '%02d' % calendar.monthrange(self.data.year, self.data.month)[1]
def U(self):
"Seconds since the Unix epoch (January 1 1970 00:00:00 GMT)"
if isinstance(self.data, datetime.datetime) and is_aware(self.data):
return int(calendar.timegm(self.data.utctimetuple()))
else:
return int(time.mktime(self.data.timetuple()))
def w(self):
"Day of the week, numeric, i.e. '0' (Sunday) to '6' (Saturday)"
return (self.data.weekday() + 1) % 7
def W(self):
"ISO-8601 week number of year, weeks starting on Monday"
# Algorithm from http://www.personal.ecu.edu/mccartyr/ISOwdALG.txt
week_number = None
jan1_weekday = self.data.replace(month=1, day=1).weekday() + 1
weekday = self.data.weekday() + 1
day_of_year = self.z()
if day_of_year <= (8 - jan1_weekday) and jan1_weekday > 4:
if jan1_weekday == 5 or (jan1_weekday == 6 and calendar.isleap(self.data.year - 1)):
week_number = 53
else:
week_number = 52
else:
if calendar.isleap(self.data.year):
i = 366
else:
i = 365
if (i - day_of_year) < (4 - weekday):
week_number = 1
else:
j = day_of_year + (7 - weekday) + (jan1_weekday - 1)
week_number = j // 7
if jan1_weekday > 4:
week_number -= 1
return week_number
def y(self):
"Year, 2 digits; e.g. '99'"
return six.text_type(self.data.year)[2:]
def Y(self):
"Year, 4 digits; e.g. '1999'"
return self.data.year
def z(self):
"Day of the year; i.e. '0' to '365'"
doy = self.year_days[self.data.month] + self.data.day
if self.L() and self.data.month > 2:
doy += 1
return doy
def format(value, format_string):
"Convenience function"
df = DateFormat(value)
return df.format(format_string)
def time_format(value, format_string):
"Convenience function"
tf = TimeFormat(value)
return tf.format(format_string)
| bsd-3-clause |
triveous/LearnFlask | flask/lib/python2.7/site-packages/flask/templating.py | 783 | 4707 | # -*- coding: utf-8 -*-
"""
flask.templating
~~~~~~~~~~~~~~~~
Implements the bridge to Jinja2.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import posixpath
from jinja2 import BaseLoader, Environment as BaseEnvironment, \
TemplateNotFound
from .globals import _request_ctx_stack, _app_ctx_stack
from .signals import template_rendered
from .module import blueprint_is_module
from ._compat import itervalues, iteritems
def _default_template_ctx_processor():
"""Default template context processor. Injects `request`,
`session` and `g`.
"""
reqctx = _request_ctx_stack.top
appctx = _app_ctx_stack.top
rv = {}
if appctx is not None:
rv['g'] = appctx.g
if reqctx is not None:
rv['request'] = reqctx.request
rv['session'] = reqctx.session
return rv
class Environment(BaseEnvironment):
"""Works like a regular Jinja2 environment but has some additional
knowledge of how Flask's blueprint works so that it can prepend the
name of the blueprint to referenced templates if necessary.
"""
def __init__(self, app, **options):
if 'loader' not in options:
options['loader'] = app.create_global_jinja_loader()
BaseEnvironment.__init__(self, **options)
self.app = app
class DispatchingJinjaLoader(BaseLoader):
"""A loader that looks for templates in the application and all
the blueprint folders.
"""
def __init__(self, app):
self.app = app
def get_source(self, environment, template):
for loader, local_name in self._iter_loaders(template):
try:
return loader.get_source(environment, local_name)
except TemplateNotFound:
pass
raise TemplateNotFound(template)
def _iter_loaders(self, template):
loader = self.app.jinja_loader
if loader is not None:
yield loader, template
# old style module based loaders in case we are dealing with a
# blueprint that is an old style module
try:
module, local_name = posixpath.normpath(template).split('/', 1)
blueprint = self.app.blueprints[module]
if blueprint_is_module(blueprint):
loader = blueprint.jinja_loader
if loader is not None:
yield loader, local_name
except (ValueError, KeyError):
pass
for blueprint in itervalues(self.app.blueprints):
if blueprint_is_module(blueprint):
continue
loader = blueprint.jinja_loader
if loader is not None:
yield loader, template
def list_templates(self):
result = set()
loader = self.app.jinja_loader
if loader is not None:
result.update(loader.list_templates())
for name, blueprint in iteritems(self.app.blueprints):
loader = blueprint.jinja_loader
if loader is not None:
for template in loader.list_templates():
prefix = ''
if blueprint_is_module(blueprint):
prefix = name + '/'
result.add(prefix + template)
return list(result)
def _render(template, context, app):
"""Renders the template and fires the signal"""
rv = template.render(context)
template_rendered.send(app, template=template, context=context)
return rv
def render_template(template_name_or_list, **context):
"""Renders a template from the template folder with the given
context.
:param template_name_or_list: the name of the template to be
rendered, or an iterable with template names
the first one existing will be rendered
:param context: the variables that should be available in the
context of the template.
"""
ctx = _app_ctx_stack.top
ctx.app.update_template_context(context)
return _render(ctx.app.jinja_env.get_or_select_template(template_name_or_list),
context, ctx.app)
def render_template_string(source, **context):
"""Renders a template from the given template source string
with the given context.
:param source: the sourcecode of the template to be
rendered
:param context: the variables that should be available in the
context of the template.
"""
ctx = _app_ctx_stack.top
ctx.app.update_template_context(context)
return _render(ctx.app.jinja_env.from_string(source),
context, ctx.app)
| apache-2.0 |
kyroskoh/js-test-tool | js_test_tool/tests/test_suite.py | 3 | 26335 | import unittest
import mock
import os
import os.path
from StringIO import StringIO
import yaml
import copy
from textwrap import dedent
from lxml import etree
from js_test_tool.tests.helpers import TempWorkspaceTestCase
from js_test_tool.suite import SuiteDescription, SuiteDescriptionError, \
SuiteRenderer, SuiteRendererError
class SuiteDescriptionTest(TempWorkspaceTestCase):
# Temporary directory paths to be created within our root temp dir
TEMP_DIRS = ['src/subdir', 'spec/subdir', 'lib/subdir',
'src/empty', 'spec/empty', 'lib/empty',
'other_src', 'other_spec', 'other_lib',
'fixtures', 'single_file']
# Test files to create. Paths specified relative to the root temp dir.
LIB_FILES = ['lib/1.js', 'lib/2.js', 'lib/subdir/3.js',
'other_lib/test.js',
'single_file/lib.js']
SRC_FILES = ['src/1.js', 'src/2.js', 'src/subdir/3.js',
'other_src/test.js',
'single_file/src.js']
SPEC_FILES = ['spec/1.js', 'spec/2.js', 'spec/subdir/3.js',
'other_spec/test.js',
'single_file/spec.js']
FIXTURE_FILES = ['fixtures/fix1.html', 'fixtures/fix2.html',
'single_file/fix.html']
IGNORE_FILES = ['src/ignore.txt', 'spec/ignore.txt', 'lib/ignore.txt']
# Valid data used to create the YAML file describing the test suite
YAML_DATA = {'test_suite_name': 'test_suite',
'lib_paths': ['lib', 'other_lib', 'single_file/lib.js'],
'src_paths': ['src', 'other_src', 'single_file/src.js'],
'spec_paths': ['spec', 'other_spec', 'single_file/spec.js'],
'fixture_paths': ['fixtures', 'single_file/fix.html'],
'test_runner': 'jasmine'}
def setUp(self):
"""
Generate fake JS files in a temporary directory.
"""
# Call the superclass implementation to create the temp workspace
super(SuiteDescriptionTest, self).setUp()
# Create subdirectories for dependency, source, and spec files
# Because we are using `makedirs()`, the intermediate directories
# will also be created.
for dir_path in self.TEMP_DIRS:
os.makedirs(os.path.join(self.temp_dir, dir_path))
# Create the test files
all_files = (self.LIB_FILES + self.SRC_FILES
+ self.SPEC_FILES + self.FIXTURE_FILES
+ self.IGNORE_FILES)
for file_path in all_files:
full_path = os.path.join(self.temp_dir, file_path)
with open(full_path, "w") as file_handle:
file_handle.write(u'\u023Eest \u0256ata'.encode('utf8'))
def test_valid_description(self):
# Create an in-memory YAML file from the data
yaml_file = self._yaml_buffer(self.YAML_DATA)
# Create the suite description using the YAML file
desc = SuiteDescription(yaml_file, self.temp_dir)
# Check that the root directory is stored
self.assertEqual(desc.root_dir(), self.temp_dir)
# Check that we find the files we expect
self.assertEqual(desc.suite_name(), self.YAML_DATA['test_suite_name'])
self.assertEqual(desc.lib_paths(), self.LIB_FILES)
self.assertEqual(desc.src_paths(), self.SRC_FILES)
self.assertEqual(desc.spec_paths(), self.SPEC_FILES)
self.assertEqual(desc.fixture_paths(), self.FIXTURE_FILES)
self.assertEqual(desc.test_runner(), self.YAML_DATA['test_runner'])
self.assertEqual(desc.prepend_path(), '')
def test_different_working_dir(self):
# Change the working directory temporarily
# (the superclass will reset it afterwards)
os.chdir(self.TEMP_DIRS[0])
# Create an in-memory YAML file from the data
yaml_file = self._yaml_buffer(self.YAML_DATA)
# Create the suite description using the YAML file
desc = SuiteDescription(yaml_file, self.temp_dir)
# Check that we find the files we expect
self.assertEqual(desc.lib_paths(), self.LIB_FILES)
self.assertEqual(desc.src_paths(), self.SRC_FILES)
self.assertEqual(desc.spec_paths(), self.SPEC_FILES)
self.assertEqual(desc.fixture_paths(), self.FIXTURE_FILES)
self.assertEqual(desc.test_runner(), self.YAML_DATA['test_runner'])
def test_double_dot_paths(self):
# Transform the paths into relative paths
rel_path_map = lambda path: os.path.join('..', path)
yaml_data = copy.deepcopy(self.YAML_DATA)
for key in ['lib_paths', 'src_paths', 'spec_paths', 'fixture_paths']:
yaml_data[key] = map(rel_path_map, yaml_data[key])
# Create a new root directory for the suite
# temp_dir/suite_root
# where the files are still in ../lib, ../src, etc.
suite_root = os.path.join(self.temp_dir, 'suite_root')
os.mkdir(suite_root)
# Create an in-memory YAML file from the data
yaml_file = self._yaml_buffer(yaml_data)
# Expect an error for using relative paths,
# even though the files exist
with self.assertRaises(SuiteDescriptionError):
SuiteDescription(yaml_file, suite_root)
def test_no_such_root_dir(self):
# Try to create a description with a non-existent root directory
yaml_file = self._yaml_buffer(self.YAML_DATA)
no_such_dir = os.path.join(self.temp_dir, 'no_such_dir')
with self.assertRaises(SuiteDescriptionError):
SuiteDescription(yaml_file, no_such_dir)
def test_root_dir_is_file(self):
# Try to create a description with a file (not directory) root
yaml_file = self._yaml_buffer(self.YAML_DATA)
file_path = os.path.join(self.temp_dir, self.SRC_FILES[0])
with self.assertRaises(SuiteDescriptionError):
SuiteDescription(yaml_file, file_path)
def test_non_list_data(self):
# Replace all list values with single values
yaml_data = copy.deepcopy(self.YAML_DATA)
yaml_data['lib_paths'] = 'lib'
yaml_data['src_paths'] = 'src'
yaml_data['spec_paths'] = 'spec'
yaml_data['fixture_paths'] = 'fixtures'
# Create an in-memory YAML file from the data
yaml_file = self._yaml_buffer(yaml_data)
# Create the suite description using the YAML file
desc = SuiteDescription(yaml_file, self.temp_dir)
# Check that we get the right paths
# (exclude files from the directories we left out)
self.assertEqual(desc.lib_paths(), self.LIB_FILES[0:3])
self.assertEqual(desc.src_paths(), self.SRC_FILES[0:3])
self.assertEqual(desc.spec_paths(), self.SPEC_FILES[0:3])
def test_prepend_path_is_not_string(self):
# Set prepend_path to non-string values
for prepend_path in [42, ['list', 'of', 'items'], {'dict': 12}]:
yaml_data = copy.deepcopy(self.YAML_DATA)
yaml_data['prepend_path'] = prepend_path
self._assert_invalid_desc(yaml_data)
def test_yaml_is_list_not_dict(self):
# Set up the YAML file to be a list of dicts instead
# of a dict.
# (This is easy to do by mistake in the YAML syntax).
bad_data = [{key: value} for key, value in self.YAML_DATA.iteritems()]
yaml_file = self._yaml_buffer(bad_data)
# Expect an exception
with self.assertRaises(SuiteDescriptionError):
SuiteDescription(yaml_file, self.temp_dir)
def test_no_lib_specified(self):
# 'lib_paths' is an optional key
yaml_data = copy.deepcopy(self.YAML_DATA)
del yaml_data['lib_paths']
# Create an in-memory YAML file from the data
yaml_file = self._yaml_buffer(yaml_data)
# Create the suite description using the YAML file
desc = SuiteDescription(yaml_file, self.temp_dir)
# Check that we get an empty list of lib paths
self.assertEqual(desc.lib_paths(), [])
def test_no_fixtures_specified(self):
# 'fixture_paths' is an optional key
yaml_data = copy.deepcopy(self.YAML_DATA)
del yaml_data['fixture_paths']
# Create an in-memory YAML file from the data
yaml_file = self._yaml_buffer(yaml_data)
# Create the suite description using the YAML file
desc = SuiteDescription(yaml_file, self.temp_dir)
# Check that we get an empty list of lib paths
self.assertEqual(desc.fixture_paths(), [])
def test_non_js_paths(self):
# Add extra non-JS files
yaml_data = copy.deepcopy(self.YAML_DATA)
yaml_data['src_paths'].append('src.txt')
yaml_data['spec_paths'].append('src.txt')
yaml_data['lib_paths'].append('src.txt')
# Create an in-memory YAML file from the data
yaml_file = self._yaml_buffer(yaml_data)
# Create the suite description using the YAML file
desc = SuiteDescription(yaml_file, self.temp_dir)
# Check that we ignore those files
self.assertEqual(desc.lib_paths(), self.LIB_FILES)
self.assertEqual(desc.src_paths(), self.SRC_FILES)
self.assertEqual(desc.spec_paths(), self.SPEC_FILES)
def test_repeated_paths(self):
# Repeat paths that are already included in the directories
yaml_data = copy.deepcopy(self.YAML_DATA)
yaml_data['src_paths'].append(self.SRC_FILES[0])
yaml_data['spec_paths'].append(self.SPEC_FILES[0])
yaml_data['lib_paths'].append(self.LIB_FILES[0])
yaml_data['fixture_paths'].append(self.FIXTURE_FILES[0])
# Create an in-memory YAML file from the data
yaml_file = self._yaml_buffer(yaml_data)
# Create the suite description using the YAML file
desc = SuiteDescription(yaml_file, self.temp_dir)
# Check that we ignore repeats
self.assertEqual(desc.lib_paths(), self.LIB_FILES)
self.assertEqual(desc.src_paths(), self.SRC_FILES)
self.assertEqual(desc.spec_paths(), self.SPEC_FILES)
self.assertEqual(desc.fixture_paths(), self.FIXTURE_FILES)
def test_prepend_path(self):
# Add a path to prepend to source paths in reports
yaml_data = copy.deepcopy(self.YAML_DATA)
yaml_data['prepend_path'] = 'base/path'
# Create an in-memory YAML file from the data
yaml_file = self._yaml_buffer(yaml_data)
# Create the suite description using the YAML file
desc = SuiteDescription(yaml_file, self.temp_dir)
# Check that the prepend path is stored
self.assertEqual(desc.prepend_path(), 'base/path')
def test_exclude_from_page(self):
# Add in a rule to exclude files in other_* dir
yaml_data = copy.deepcopy(self.YAML_DATA)
yaml_data['exclude_from_page'] = 'other_[^/]*/.*'
# Create an in-memory YAML file from the data
yaml_file = self._yaml_buffer(yaml_data)
# Create the suite description using the YAML file
desc = SuiteDescription(yaml_file, self.temp_dir)
# Check that the root directory is stored
self.assertEqual(desc.root_dir(), self.temp_dir)
# Check that we find the files we expect
expected_lib = self.LIB_FILES[:]
expected_lib.remove('other_lib/test.js')
expected_src = self.SRC_FILES[:]
expected_src.remove('other_src/test.js')
expected_spec = self.SPEC_FILES[:]
expected_spec.remove('other_spec/test.js')
self.assertEqual(desc.lib_paths(only_in_page=True), expected_lib)
self.assertEqual(desc.src_paths(only_in_page=True), expected_src)
self.assertEqual(desc.spec_paths(only_in_page=True), expected_spec)
def test_include_and_exclude_from_page(self):
# Add in a rule to exclude files in other_* dir
yaml_data = copy.deepcopy(self.YAML_DATA)
yaml_data['exclude_from_page'] = 'other_[^/]*/.*'
# Add an override rule to always include other_*/test.js
yaml_data['include_in_page'] = 'other_[^/]*/test.js'
# Create an in-memory YAML file from the data
yaml_file = self._yaml_buffer(yaml_data)
# Create the suite description using the YAML file
desc = SuiteDescription(yaml_file, self.temp_dir)
# Check that the root directory is stored
self.assertEqual(desc.root_dir(), self.temp_dir)
# Check that we still get all the files back
# (the include rule overrides the exclude rule)
self.assertEqual(desc.lib_paths(only_in_page=True), self.LIB_FILES)
self.assertEqual(desc.src_paths(only_in_page=True), self.SRC_FILES)
self.assertEqual(desc.spec_paths(only_in_page=True), self.SPEC_FILES)
def test_missing_required_data(self):
for key in ['test_suite_name', 'src_paths', 'spec_paths', 'test_runner']:
# Delete the required key from the description
yaml_data = copy.deepcopy(self.YAML_DATA)
del yaml_data[key]
# Print a message to make failures more informative
print "Missing key '{}' should raise an exception".format(key)
# Check that we get an exception
self._assert_invalid_desc(yaml_data)
def test_empty_required_list(self):
for key in ['src_paths', 'spec_paths']:
# Replace the key with an empty list
yaml_data = copy.deepcopy(self.YAML_DATA)
yaml_data[key] = []
# Print a message to make failures more informative
print "Empty list for '{}' should raise an exception".format(key)
# Check that we get an exception
self._assert_invalid_desc(yaml_data)
def test_invalid_test_runner(self):
yaml_data = copy.deepcopy(self.YAML_DATA)
yaml_data['test_runner'] = 'invalid_test_runner'
# Check that we get an exception
self._assert_invalid_desc(yaml_data)
def test_invalid_suite_name(self):
invalid_names = [
'with a space',
'with/slash',
'with?question',
'with+plus',
'with&'
]
# Suite names need to be URL-encodable
for invalid in invalid_names:
print invalid
yaml_data = copy.deepcopy(self.YAML_DATA)
yaml_data['test_suite_name'] = invalid
self._assert_invalid_desc(yaml_data)
def _assert_invalid_desc(self, yaml_data):
"""
Given `yaml_data` (dict), assert that it raises
a `SuiteDescriptionError`.
"""
# Create an in-memory YAML file from the data
yaml_file = self._yaml_buffer(yaml_data)
# Expect an exception when we try to parse the YAML file
with self.assertRaises(SuiteDescriptionError):
SuiteDescription(yaml_file, self.temp_dir)
@staticmethod
def _yaml_buffer(data_dict):
"""
Create an in-memory buffer with YAML-encoded data
provided by `data_dict` (a dictionary).
Returns the buffer (a file-like object).
"""
# Encode the `data_dict` as YAML and write it to the buffer
yaml_str = yaml.dump(data_dict)
# Create a file-like string buffer to hold the YAML data
string_buffer = StringIO(yaml_str)
return string_buffer
class SuiteRendererTest(unittest.TestCase):
JASMINE_TEST_RUNNER_SCRIPT = dedent("""
(function() {
var jasmineEnv = jasmine.getEnv();
jasmineEnv.updateInterval = 1000;
var reporter = new jasmine.JsonReporter("js_test_tool_results", "test-suite");
jasmineEnv.addReporter(reporter);
jasmineEnv.specFilter = function(spec) {
return reporter.specFilter(spec);
};
var currentWindowOnload = window.onload;
window.onload = function() {
if (currentWindowOnload) {
currentWindowOnload();
}
execJasmine();
};
function execJasmine() {
try {
jasmineEnv.execute();
}
catch(err) {
window.js_test_tool.reportError(err);
}
}
if (!window.js_test_tool) {
window.js_test_tool = {};
window.js_test_tool.reportError = function(err) {
var resultDiv = document.getElementById("js_test_tool_results");
var errDiv = document.getElementById("js_test_tool_error");
// If an error <div> is defined (e.g. not in dev mode)
// then write the error to that <div>
// so the Browser can report it
if (errDiv) {
errDiv.innerHTML = err.toString()
if ('stack' in err) {
errDiv.innerHTML += "\\n" + err.stack
}
// Signal to the browser that we're done
// to avoid blocking until timeout
resultsDiv.className = "done";
}
// Re-throw the error (e.g. for dev mode)
else {
throw err;
}
}
}
})();
""").strip()
JASMINE_LOAD_FIXTURES_SCRIPT = dedent("""
// Load fixtures if using jasmine-jquery
if (jasmine.getFixtures) {
jasmine.getFixtures().fixturesPath = "/suite/test-suite/include/";
}
""").strip()
ALERT_STUB_SCRIPT = dedent("""
// Stub out modal dialog alerts, which will prevent
// us from accessing the test results in the DOM
window.confirm = function(){return true;};
window.alert = function(){return;};
""").strip()
def setUp(self):
# Create the renderer we will use
self.renderer = SuiteRenderer()
def test_unicode(self):
# Create a mock test suite description
desc = self._mock_desc(['lib1.js', 'lib2.js'],
['src1.js', 'src2.js'],
['spec1.js', 'spec2.js'],
'jasmine')
# Render the description as HTML
html = self.renderer.render_to_string('test-suite', desc)
# Expect that we get a `unicode` string
self.assertTrue(isinstance(html, unicode))
def test_jasmine_runner_includes(self):
jasmine_libs = ['jasmine/jasmine.js',
'jasmine/jasmine-json.js']
lib_paths = ['lib1.js', 'lib2.js']
src_paths = ['src1.js', 'src2.js']
spec_paths = ['spec1.js', 'spec2.js']
# Create a mock test suite description
desc = self._mock_desc(lib_paths, src_paths, spec_paths, 'jasmine')
# Check that we get the right script includes
suite_includes = lib_paths + src_paths + spec_paths
self._assert_js_includes(jasmine_libs, suite_includes, desc)
# Check that only "include_in_page" scripts were used
desc.lib_paths.assert_called_with(only_in_page=True)
desc.src_paths.assert_called_with(only_in_page=True)
desc.spec_paths.assert_called_with(only_in_page=True)
def test_no_lib_files(self):
jasmine_libs = ['jasmine/jasmine.js',
'jasmine/jasmine-json.js']
src_paths = ['src.js']
spec_paths = ['spec.js']
# Create a mock test suite description
desc = self._mock_desc([], src_paths, spec_paths, 'jasmine')
# Check that we get the right script includes
suite_includes = src_paths + spec_paths
self._assert_js_includes(jasmine_libs, suite_includes, desc)
def test_render_jasmine_runner(self):
# Create a test runner page
tree = self._test_runner_html()
# Expect that a <div> exists with the correct ID for the results
div_id = SuiteRenderer.RESULTS_DIV_ID
elems = tree.xpath('//div[@id="{}"]'.format(div_id))
self.assertEqual(len(elems), 1)
# Expect that a <div> exists for reporting JS errors
div_id = SuiteRenderer.ERROR_DIV_ID
elems = tree.xpath('//div[@id="{}"]'.format(div_id))
self.assertEqual(len(elems), 1)
# Expect that the right scripts are available
self._assert_script(tree, self.JASMINE_TEST_RUNNER_SCRIPT, -1)
self._assert_script(tree, self.JASMINE_LOAD_FIXTURES_SCRIPT, -2)
def test_render_jasmine_dev_mode(self):
# Create a test runner page in dev mode
tree = self._test_runner_html(dev_mode=True)
# Should get the same script, except with an HTML reporter
# instead of the custom JSON reporter
expected_script = self.JASMINE_TEST_RUNNER_SCRIPT.replace(
'JsonReporter("js_test_tool_results", "test-suite")',
'HtmlReporter()')
# Check that we have the right script available
self._assert_script(tree, expected_script, -1)
def test_jasmine_dev_mode_includes(self):
# Configure the renderer to use dev mode
self.renderer = SuiteRenderer(dev_mode=True)
# Include the HTMLReporter instead of the JSON reporter
jasmine_libs = ['jasmine/jasmine.js',
'jasmine/jasmine-html.js']
lib_paths = ['lib1.js', 'lib2.js']
src_paths = ['src1.js', 'src2.js']
spec_paths = ['spec1.js', 'spec2.js']
# Create a mock test suite description
desc = self._mock_desc(lib_paths, src_paths, spec_paths, 'jasmine')
# Check that we get the right script includes
suite_includes = lib_paths + src_paths + spec_paths
self._assert_js_includes(jasmine_libs, suite_includes, desc)
def test_stub_alerts(self):
tree = self._test_runner_html()
self._assert_script(tree, self.ALERT_STUB_SCRIPT, 0)
def test_stub_alerts_dev_mode(self):
tree = self._test_runner_html(dev_mode=True)
self._assert_script(tree, self.ALERT_STUB_SCRIPT, 0)
def test_undefined_template(self):
# Create a mock test suite description with an invalid test runner
desc = self._mock_desc([], [], [], 'invalid_test_runner')
# Should get an exception that the template could not be found
with self.assertRaises(SuiteRendererError):
self.renderer.render_to_string('test-suite', desc)
def test_template_render_error(self):
# Create a mock test suite description with no includes
desc = self._mock_desc([], [], [], 'jasmine')
# Patch Jinja2's `render()` function
with mock.patch.object(SuiteRenderer, 'render_template') as render_func:
# Have the render function raise an exception
render_func.side_effect = ValueError()
# Expect that we get a `SuiteRendererError`
with self.assertRaises(SuiteRendererError):
self.renderer.render_to_string('test-suite', desc)
def _test_runner_html(self, dev_mode=False):
"""
Return a parsed tree of the test runner page HTML.
"""
# Configure the renderer to use dev mode
self.renderer = SuiteRenderer(dev_mode=dev_mode)
# Create a mock test suite description
desc = self._mock_desc([], [], [], 'jasmine')
# Render the description to HTML, enabling dev mode
html = self.renderer.render_to_string('test-suite', desc)
# Parse the HTML
return etree.HTML(html)
def _assert_script(self, html_tree, expected_script, script_index):
"""
Assert that the parsed HTML tree `html_tree` contains
`expected_script` in a <script> tag at `script_index` (starting at 0).
"""
# Retrieve the script elements
script_elems = html_tree.xpath('/html/head/script')
# Expect there are enough elements to retrieve the index
self.assertTrue(len(script_elems) > abs(script_index))
# Retrieve the script element
actual_script = script_elems[script_index].text.strip()
# Expect that we got the right script
self.assertEqual(actual_script, expected_script)
def _assert_js_includes(self, runner_includes, suite_includes, suite_desc):
"""
Render `suite_desc` (a `SuiteDescription` instance or mock) to
`html`, then asserts that the `html` contains `<script>` tags with
`runner_includes` (files included by default, with a `/runner/` prefix)
and `suite_includes` (files included by the test suite,
with a `/suite/include` prefix)
"""
# Render the description as HTML
html = self.renderer.render_to_string('test-suite', suite_desc)
# Parse the HTML
tree = etree.HTML(html)
# Retrieve all <script> inclusions
script_elems = tree.xpath('/html/head/script')
# Prepend the runner and suite includes
runner_includes = [os.path.join('/runner', path)
for path in runner_includes]
suite_includes = [os.path.join('/suite', 'test-suite', 'include', path)
for path in suite_includes]
# Check that they match the sources we provided, in order
all_paths = [element.get('src') for element in script_elems
if element.get('src') is not None]
self.assertEqual(all_paths, runner_includes + suite_includes)
@staticmethod
def _mock_desc(lib_paths, src_paths, spec_paths, test_runner):
"""
Create a mock SuiteDescription configured to return
`lib_paths` (paths to JS dependency files)
`src_paths` (paths to JS source files)
`spec_paths` (paths to JS spec files)
`test_runner` (name of the test runner, e.g. Jasmine)
Returns the configured mock
"""
desc = mock.MagicMock(SuiteDescription)
desc.lib_paths.return_value = lib_paths
desc.src_paths.return_value = src_paths
desc.spec_paths.return_value = spec_paths
desc.test_runner.return_value = test_runner
return desc
| apache-2.0 |
elpaso/QGIS | tests/src/python/test_qgsxmlutils.py | 3 | 6048 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsXmlUtils.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Matthias Kuhn'
__date__ = '18/11/2016'
__copyright__ = 'Copyright 2016, The QGIS Project'
import qgis # NOQA switch sip api
from qgis.core import (QgsXmlUtils,
QgsProperty,
QgsGeometry,
QgsCoordinateReferenceSystem)
from qgis.PyQt.QtXml import QDomDocument
from qgis.PyQt.QtGui import QColor
from qgis.testing import start_app, unittest
start_app()
class TestQgsXmlUtils(unittest.TestCase):
def test_invalid(self):
"""
Test that invalid attributes are correctly loaded and written
"""
doc = QDomDocument("properties")
elem = QgsXmlUtils.writeVariant(None, doc)
prop2 = QgsXmlUtils.readVariant(elem)
self.assertIsNone(prop2)
def test_integer(self):
"""
Test that maps are correctly loaded and written
"""
doc = QDomDocument("properties")
my_properties = {'a': 1, 'b': 2, 'c': 3, 'd': -1}
elem = QgsXmlUtils.writeVariant(my_properties, doc)
prop2 = QgsXmlUtils.readVariant(elem)
self.assertEqual(my_properties, prop2)
def test_long(self):
"""
Test that maps are correctly loaded and written
"""
doc = QDomDocument("properties")
# not sure if this actually does map to a long?
my_properties = {'a': 9223372036854775808}
elem = QgsXmlUtils.writeVariant(my_properties, doc)
prop2 = QgsXmlUtils.readVariant(elem)
self.assertEqual(my_properties, prop2)
def test_string(self):
"""
Test that strings are correctly loaded and written
"""
doc = QDomDocument("properties")
my_properties = {'a': 'a', 'b': 'b', 'c': 'something_else', 'empty': ''}
elem = QgsXmlUtils.writeVariant(my_properties, doc)
prop2 = QgsXmlUtils.readVariant(elem)
self.assertEqual(my_properties, prop2)
def test_double(self):
"""
Test that maps are correctly loaded and written
"""
doc = QDomDocument("properties")
my_properties = {'a': 0.27, 'b': 1.0, 'c': 5}
elem = QgsXmlUtils.writeVariant(my_properties, doc)
prop2 = QgsXmlUtils.readVariant(elem)
self.assertEqual(my_properties, prop2)
def test_boolean(self):
"""
Test that maps are correctly loaded and written
"""
doc = QDomDocument("properties")
my_properties = {'a': True, 'b': False}
elem = QgsXmlUtils.writeVariant(my_properties, doc)
prop2 = QgsXmlUtils.readVariant(elem)
self.assertEqual(my_properties, prop2)
def test_list(self):
"""
Test that lists are correctly loaded and written
"""
doc = QDomDocument("properties")
my_properties = [1, 4, 'a', 'test', 7.9]
elem = QgsXmlUtils.writeVariant(my_properties, doc)
prop2 = QgsXmlUtils.readVariant(elem)
self.assertEqual(my_properties, prop2)
def test_complex(self):
"""
Test that maps are correctly loaded and written
"""
doc = QDomDocument("properties")
my_properties = {'boolean': True, 'integer': False, 'map': {'a': 1}}
elem = QgsXmlUtils.writeVariant(my_properties, doc)
prop2 = QgsXmlUtils.readVariant(elem)
self.assertEqual(my_properties, prop2)
def test_property(self):
"""
Test that QgsProperty values are correctly loaded and written
"""
doc = QDomDocument("properties")
prop = QgsProperty.fromValue(1001)
elem = QgsXmlUtils.writeVariant(prop, doc)
prop2 = QgsXmlUtils.readVariant(elem)
self.assertEqual(prop, prop2)
prop = QgsProperty.fromExpression('1+2=5')
elem = QgsXmlUtils.writeVariant(prop, doc)
prop2 = QgsXmlUtils.readVariant(elem)
self.assertEqual(prop, prop2)
prop = QgsProperty.fromField('oid')
elem = QgsXmlUtils.writeVariant(prop, doc)
prop2 = QgsXmlUtils.readVariant(elem)
self.assertEqual(prop, prop2)
def test_crs(self):
"""
Test that QgsCoordinateReferenceSystem values are correctly loaded and written
"""
doc = QDomDocument("properties")
crs = QgsCoordinateReferenceSystem('epsg:3111')
elem = QgsXmlUtils.writeVariant(crs, doc)
crs2 = QgsXmlUtils.readVariant(elem)
self.assertTrue(crs2.isValid())
self.assertEqual(crs2.authid(), 'EPSG:3111')
crs = QgsCoordinateReferenceSystem()
elem = QgsXmlUtils.writeVariant(crs, doc)
crs2 = QgsXmlUtils.readVariant(elem)
self.assertFalse(crs2.isValid())
def test_geom(self):
"""
Test that QgsGeometry values are correctly loaded and written
"""
doc = QDomDocument("properties")
g = QgsGeometry.fromWkt('Point(3 4)')
elem = QgsXmlUtils.writeVariant(g, doc)
g2 = QgsXmlUtils.readVariant(elem)
self.assertEqual(g2.asWkt(), 'Point (3 4)')
def test_color(self):
"""
Test that QColor values are correctly loaded and written
"""
doc = QDomDocument("properties")
elem = QgsXmlUtils.writeVariant(QColor(100, 200, 210), doc)
c = QgsXmlUtils.readVariant(elem)
self.assertEqual(c, QColor(100, 200, 210))
elem = QgsXmlUtils.writeVariant(QColor(100, 200, 210, 50), doc)
c = QgsXmlUtils.readVariant(elem)
self.assertEqual(c, QColor(100, 200, 210, 50))
elem = QgsXmlUtils.writeVariant(QColor(), doc)
c = QgsXmlUtils.readVariant(elem)
self.assertFalse(c.isValid())
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
mlperf/training_results_v0.7 | Google/benchmarks/transformer/implementations/transformer-research-TF-tpu-v3-8192/lingvo/tasks/mt/params/wmt14_en_de.py | 3 | 6670 | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train NMT Models on WMT'14 English-German machine translation task."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo import model_registry
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import base_model_params
from lingvo.tasks.mt import base_config
from lingvo.tasks.mt import input_generator
from lingvo.tasks.mt import model
@model_registry.RegisterSingleTaskModel
class WmtEnDeTransformerBase(base_model_params.SingleTaskModelParams):
"""Params for WMT'14 En->De."""
DATADIR = '/usr/local/google/wmt14/wpm/'
VOCAB_SIZE = 32000
def _CommonInputParams(self, is_eval):
"""Input generator params for WMT'14 En->De."""
p = input_generator.NmtInput.Params()
p.tokenizer.vocab_size = self.VOCAB_SIZE
if is_eval:
p.file_random_seed = 27182818
p.file_parallelism = 1
p.file_buffer_size = 1
p.bucket_upper_bound = [10, 14, 19, 26, 36, 50, 70, 98, 137, 200]
p.bucket_batch_limit = [16] * 8 + [4] * 2
else:
p.file_random_seed = 0
p.file_parallelism = 16
p.file_buffer_size = 10000000
p.bucket_upper_bound = ([8, 10, 12, 14, 16, 20, 24, 28] +
[32, 40, 48, 56, 64, 80, 96])
p.bucket_batch_limit = ([512, 409, 341, 292, 256, 204, 170, 146] +
[128, 102, 85, 73, 64, 51, 42])
return p
def Train(self):
p = self._CommonInputParams(is_eval=False)
p.file_pattern = 'tfrecord:' + os.path.join(self.DATADIR,
'train.tfrecords-*')
p.tokenizer.token_vocab_filepath = os.path.join(self.DATADIR,
'wpm-ende.voc')
p.num_samples = 4492447
return p
def Dev(self):
p = self._CommonInputParams(is_eval=True)
p.file_pattern = 'tfrecord:' + os.path.join(self.DATADIR, 'dev.tfrecords')
p.tokenizer.token_vocab_filepath = os.path.join(self.DATADIR,
'wpm-ende.voc')
p.num_samples = 3000
return p
def Test(self):
p = self._CommonInputParams(is_eval=True)
p.file_pattern = 'tfrecord:' + os.path.join(self.DATADIR, 'test.tfrecords')
p.tokenizer.token_vocab_filepath = os.path.join(self.DATADIR,
'wpm-ende.voc')
p.num_samples = 2737
return p
def Task(self):
p = base_config.SetupTransformerParams(
model.TransformerModel.Params(),
name='wmt14_en_de_transformer_base',
vocab_size=self.VOCAB_SIZE,
model_dim=512,
hidden_dim=2048,
num_heads=8,
num_layers=6,
residual_dropout_prob=0.1,
input_dropout_prob=0.1,
learning_rate=3.0,
warmup_steps=40000)
p.eval.samples_per_summary = 7500
return p
@model_registry.RegisterSingleTaskModel
class WmtEnDeTransformerSmall(WmtEnDeTransformerBase):
"""Small Transformer Params for WMT'14 En->De."""
def Task(self):
p = base_config.SetupTransformerParams(
model.TransformerModel.Params(),
name='wmt14_en_de_transformer_small',
vocab_size=self.VOCAB_SIZE,
model_dim=64,
hidden_dim=128,
num_heads=2,
num_layers=2,
residual_dropout_prob=0.1,
input_dropout_prob=0.1,
learning_rate=3.0,
warmup_steps=40000)
p.eval.samples_per_summary = 7500
return p
@model_registry.RegisterSingleTaskModel
class WmtEnDeTransformerSmallCloudTpu(WmtEnDeTransformerSmall):
"""Small Transformer Params for WMT'14 En->De on TPU."""
def _CommonInputParams(self, is_eval):
p = super(WmtEnDeTransformerSmallCloudTpu, self)._CommonInputParams(is_eval)
p.pad_to_max_seq_length = True
p.source_max_length = p.bucket_upper_bound[-1]
p.bucket_batch_limit = [64] * len(p.bucket_upper_bound)
return p
def Task(self):
p = super(WmtEnDeTransformerSmallCloudTpu, self).Task()
p.decoder.token_emb.max_num_shards = 1
p.encoder.token_emb.max_num_shards = 1
return p
@model_registry.RegisterSingleTaskModel
class WmtEnDeRNMT(WmtEnDeTransformerBase):
"""Params for WMT'14 En->De in sync training."""
def _CommonInputParams(self, is_eval):
p = super(WmtEnDeRNMT, self)._CommonInputParams(is_eval)
if is_eval:
p.bucket_upper_bound = [10, 14, 19, 26, 36, 50, 70, 98, 200]
p.bucket_batch_limit = [128] * 8 + [32]
else:
p.bucket_upper_bound = [10, 14, 19, 26, 36, 50, 70, 98]
p.bucket_batch_limit = [128] * 7 + [64]
return p
def Task(self):
p = base_config.SetupRNMTParams(
model.RNMTModel.Params(),
name='wmt14_en_de_rnmtplus_base',
vocab_size=self.VOCAB_SIZE,
embedding_dim=1024,
hidden_dim=1024,
num_heads=4,
num_encoder_layers=6,
num_decoder_layers=8,
learning_rate=1e-4,
l2_regularizer_weight=1e-5,
lr_warmup_steps=500,
lr_decay_start=400000,
lr_decay_end=1200000,
lr_min=0.5,
ls_uncertainty=0.1,
atten_dropout_prob=0.3,
residual_dropout_prob=0.3,
adam_beta2=0.98,
adam_epsilon=1e-6,
)
p.eval.samples_per_summary = 7500
return p
@model_registry.RegisterSingleTaskModel
class WmtEnDeRNMTCloudTpu(WmtEnDeRNMT):
"""Params for WMT'14 En->De in sync training on TPU."""
def _CommonInputParams(self, is_eval):
p = super(WmtEnDeRNMTCloudTpu, self)._CommonInputParams(is_eval)
p.pad_to_max_seq_length = True
p.source_max_length = p.bucket_upper_bound[-1]
p.bucket_batch_limit = [16] * len(p.bucket_upper_bound)
return p
def Task(self):
p = super(WmtEnDeRNMTCloudTpu, self).Task()
p.encoder.emb.max_num_shards = 1
p.decoder.emb.max_num_shards = 1
return p
| apache-2.0 |
NickPresta/sentry | src/sentry/migrations/0020_auto__add_projectdomain__add_unique_projectdomain_project_domain.py | 6 | 12686 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models, transaction
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ProjectDomain'
db.create_table('sentry_projectdomain', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(related_name='domain_set', to=orm['sentry.Project'])),
('domain', self.gf('django.db.models.fields.CharField')(max_length=128)),
))
db.send_create_signal('sentry', ['ProjectDomain'])
# Adding unique constraint on 'ProjectDomain', fields ['project', 'domain']
db.create_unique('sentry_projectdomain', ['project_id', 'domain'])
def backwards(self, orm):
# Removing unique constraint on 'ProjectDomain', fields ['project', 'domain']
db.delete_unique('sentry_projectdomain', ['project_id', 'domain'])
# Deleting model 'ProjectDomain'
db.delete_table('sentry_projectdomain')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.groupedmessage': {
'Meta': {'unique_together': "(('project', 'logger', 'view', 'checksum'),)", 'object_name': 'GroupedMessage'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'class_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'view': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'sentry.message': {
'Meta': {'object_name': 'Message'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'class_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'message_set'", 'null': 'True', 'to': "orm['sentry.GroupedMessage']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'message_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'view': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.GroupedMessage']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.GroupedMessage']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owned_project_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'sentry.projectdomain': {
'Meta': {'unique_together': "(('project', 'domain'),)", 'object_name': 'ProjectDomain'},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'domain_set'", 'to': "orm['sentry.Project']"})
},
'sentry.projectmember': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'ProjectMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'permissions': ('django.db.models.fields.BigIntegerField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'project_set'", 'to': "orm['auth.User']"})
}
}
complete_apps = ['sentry']
| bsd-3-clause |
zstackorg/zstack-woodpecker | integrationtest/vm/virtualrouter/vr/test_vr_ha.py | 2 | 1258 | '''
1. Create 1 Test VMs with VR.
2. After 1 VM created, Check VR Appliance VM ha status.
@author: Quarkonics
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.operations.ha_operations as ha_ops
_config_ = {
'timeout' : 600,
'noparallel' : False
}
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
def test():
test_util.test_dsc('Create test vm1 and check')
if test_lib.lib_get_ha_enable() != 'true':
test_util.test_skip("vm ha not enabled. Skip test")
vm1 = test_stub.create_vlan_vm()
test_obj_dict.add_vm(vm1)
vm1.check()
vrs = test_lib.lib_find_vr_by_vm(vm1.vm)
for vr in vrs:
if vr.applianceVmType != "vrouter":
continue
if ha_ops.get_vm_instance_ha_level(vr.uuid) != "NeverStop":
test_util.test_fail('vr: %s is not set to HA mode NeverStop.' % vr.uuid)
vm1.destroy()
test_util.test_pass('Check VR HA mode Success')
#Will be called only if exception happens in test().
def error_cleanup():
test_lib.lib_error_cleanup(test_obj_dict)
| apache-2.0 |
marzique/cs50_finance | sql.py | 1 | 6312 | import datetime
import decimal
import importlib
import logging
import re
import sqlalchemy
import sqlparse
import sys
import warnings
class SQL(object):
"""Wrap SQLAlchemy to provide a simple SQL API."""
def __init__(self, url, **kwargs):
"""
Create instance of sqlalchemy.engine.Engine.
URL should be a string that indicates database dialect and connection arguments.
http://docs.sqlalchemy.org/en/latest/core/engines.html#sqlalchemy.create_engine
http://docs.sqlalchemy.org/en/latest/dialects/index.html
"""
# log statements to standard error
logging.basicConfig(level=logging.DEBUG)
self.logger = logging.getLogger(__name__)
# create engine, raising exception if back end's module not installed
self.engine = sqlalchemy.create_engine(url, **kwargs)
def execute(self, text, **params):
"""
Execute a SQL statement.
"""
class UserDefinedType(sqlalchemy.TypeDecorator):
"""
Add support for expandable values, a la https://bitbucket.org/zzzeek/sqlalchemy/issues/3953/expanding-parameter.
"""
impl = sqlalchemy.types.UserDefinedType
def process_literal_param(self, value, dialect):
"""Receive a literal parameter value to be rendered inline within a statement."""
def process(value):
"""Render a literal value, escaping as needed."""
# bool
if isinstance(value, bool):
return sqlalchemy.types.Boolean().literal_processor(dialect)(value)
# datetime.date
elif isinstance(value, datetime.date):
return sqlalchemy.types.String().literal_processor(dialect)(value.strftime("%Y-%m-%d"))
# datetime.datetime
elif isinstance(value, datetime.datetime):
return sqlalchemy.types.String().literal_processor(dialect)(value.strftime("%Y-%m-%d %H:%M:%S"))
# datetime.time
elif isinstance(value, datetime.time):
return sqlalchemy.types.String().literal_processor(dialect)(value.strftime("%H:%M:%S"))
# float
elif isinstance(value, float):
return sqlalchemy.types.Float().literal_processor(dialect)(value)
# int
elif isinstance(value, int):
return sqlalchemy.types.Integer().literal_processor(dialect)(value)
# # long
# elif sys.version_info.major != 3 and isinstance(value, long):
# return sqlalchemy.types.Integer().literal_processor(dialect)(value)
# str
elif isinstance(value, str):
return sqlalchemy.types.String().literal_processor(dialect)(value)
# None
elif isinstance(value, sqlalchemy.sql.elements.Null):
return sqlalchemy.types.NullType().literal_processor(dialect)(value)
# unsupported value
raise RuntimeError("unsupported value")
# process value(s), separating with commas as needed
if type(value) is list:
return ", ".join([process(v) for v in value])
else:
return process(value)
# allow only one statement at a time
if len(sqlparse.split(text)) > 1:
raise RuntimeError("too many statements at once")
# raise exceptions for warnings
warnings.filterwarnings("error")
# prepare, execute statement
try:
# construct a new TextClause clause
statement = sqlalchemy.text(text)
# iterate over parameters
for key, value in params.items():
# translate None to NULL
if value is None:
value = sqlalchemy.sql.null()
# bind parameters before statement reaches database, so that bound parameters appear in exceptions
# http://docs.sqlalchemy.org/en/latest/core/sqlelement.html#sqlalchemy.sql.expression.text
statement = statement.bindparams(sqlalchemy.bindparam(key, value=value, type_=UserDefinedType()))
# stringify bound parameters
# http://docs.sqlalchemy.org/en/latest/faq/sqlexpressions.html#how-do-i-render-sql-expressions-as-strings-possibly-with-bound-parameters-inlined
statement = str(statement.compile(compile_kwargs={"literal_binds": True}))
# execute statement
result = self.engine.execute(statement)
# log statement
self.logger.debug(statement)
# if SELECT (or INSERT with RETURNING), return result set as list of dict objects
if re.search(r"^\s*SELECT", statement, re.I):
# coerce any decimal.Decimal objects to float objects
# https://groups.google.com/d/msg/sqlalchemy/0qXMYJvq8SA/oqtvMD9Uw-kJ
rows = [dict(row) for row in result.fetchall()]
for row in rows:
for column in row:
if isinstance(row[column], decimal.Decimal):
row[column] = float(row[column])
return rows
# if INSERT, return primary key value for a newly inserted row
elif re.search(r"^\s*INSERT", statement, re.I):
if self.engine.url.get_backend_name() in ["postgres", "postgresql"]:
result = self.engine.execute(sqlalchemy.text("SELECT LASTVAL()"))
return result.first()[0]
else:
return result.lastrowid
# if DELETE or UPDATE, return number of rows matched
elif re.search(r"^\s*(?:DELETE|UPDATE)", statement, re.I):
return result.rowcount
# if some other statement, return True unless exception
return True
# if constraint violated, return None
except sqlalchemy.exc.IntegrityError:
return None | mit |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.6.0/Lib/test/test_importlib/frozen/test_finder.py | 17 | 2105 | from .. import abc
from .. import util
machinery = util.import_importlib('importlib.machinery')
import unittest
class FindSpecTests(abc.FinderTests):
"""Test finding frozen modules."""
def find(self, name, path=None):
finder = self.machinery.FrozenImporter
return finder.find_spec(name, path)
def test_module(self):
name = '__hello__'
spec = self.find(name)
self.assertEqual(spec.origin, 'frozen')
def test_package(self):
spec = self.find('__phello__')
self.assertIsNotNone(spec)
def test_module_in_package(self):
spec = self.find('__phello__.spam', ['__phello__'])
self.assertIsNotNone(spec)
# No frozen package within another package to test with.
test_package_in_package = None
# No easy way to test.
test_package_over_module = None
def test_failure(self):
spec = self.find('<not real>')
self.assertIsNone(spec)
(Frozen_FindSpecTests,
Source_FindSpecTests
) = util.test_both(FindSpecTests, machinery=machinery)
class FinderTests(abc.FinderTests):
"""Test finding frozen modules."""
def find(self, name, path=None):
finder = self.machinery.FrozenImporter
return finder.find_module(name, path)
def test_module(self):
name = '__hello__'
loader = self.find(name)
self.assertTrue(hasattr(loader, 'load_module'))
def test_package(self):
loader = self.find('__phello__')
self.assertTrue(hasattr(loader, 'load_module'))
def test_module_in_package(self):
loader = self.find('__phello__.spam', ['__phello__'])
self.assertTrue(hasattr(loader, 'load_module'))
# No frozen package within another package to test with.
test_package_in_package = None
# No easy way to test.
test_package_over_module = None
def test_failure(self):
loader = self.find('<not real>')
self.assertIsNone(loader)
(Frozen_FinderTests,
Source_FinderTests
) = util.test_both(FinderTests, machinery=machinery)
if __name__ == '__main__':
unittest.main()
| mit |
MediaMath/Diamond | src/collectors/beanstalkd/test/testbeanstalkd.py | 2 | 6503 | #!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from test import run_only
from mock import Mock
from mock import patch
from diamond.collector import Collector
from beanstalkd import BeanstalkdCollector
################################################################################
def run_only_if_beanstalkc_is_available(func):
try:
import beanstalkc
beanstalkc # workaround for pyflakes issue #13
except ImportError:
beanstalkc = None
pred = lambda: beanstalkc is not None
return run_only(func, pred)
class TestBeanstalkdCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('BeanstalkdCollector', {
'host': 'localhost',
'port': 11300,
})
self.collector = BeanstalkdCollector(config, None)
def test_import(self):
self.assertTrue(BeanstalkdCollector)
@run_only_if_beanstalkc_is_available
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
stats = {
'instance': {
'current-connections': 10,
'max-job-size': 65535,
'cmd-release': 0,
'cmd-reserve': 4386,
'pid': 23703,
'cmd-bury': 0,
'current-producers': 0,
'total-jobs': 4331,
'current-jobs-ready': 0,
'cmd-peek-buried': 0,
'current-tubes': 7,
'current-jobs-delayed': 0,
'uptime': 182954,
'cmd-watch': 55,
'job-timeouts': 0,
'cmd-stats': 1,
'rusage-stime': 295.970497,
'current-jobs-reserved': 0,
'current-jobs-buried': 0,
'cmd-reserve-with-timeout': 0,
'cmd-put': 4331,
'cmd-pause-tube': 0,
'cmd-list-tubes-watched': 0,
'cmd-list-tubes': 0,
'current-workers': 9,
'cmd-list-tube-used': 0,
'cmd-ignore': 0,
'binlog-records-migrated': 0,
'current-waiting': 9,
'cmd-peek': 0,
'cmd-peek-ready': 0,
'cmd-peek-delayed': 0,
'cmd-touch': 0,
'binlog-oldest-index': 0,
'binlog-current-index': 0,
'cmd-use': 4321,
'total-connections': 4387,
'cmd-delete': 4331,
'binlog-max-size': 10485760,
'cmd-stats-job': 0,
'rusage-utime': 125.92787,
'cmd-stats-tube': 0,
'binlog-records-written': 0,
'cmd-kick': 0,
'current-jobs-urgent': 0,
},
'tubes': [
{
'current-jobs-delayed': 0,
'pause': 0,
'name': 'default',
'cmd-pause-tube': 0,
'current-jobs-buried': 0,
'cmd-delete': 10,
'pause-time-left': 0,
'current-waiting': 9,
'current-jobs-ready': 0,
'total-jobs': 10,
'current-watching': 10,
'current-jobs-reserved': 0,
'current-using': 10,
'current-jobs-urgent': 0,
}
]
}
patch_get_stats = patch.object(BeanstalkdCollector,
'_get_stats',
Mock(return_value=stats))
patch_get_stats.start()
self.collector.collect()
patch_get_stats.stop()
metrics = {
'current-connections': 10,
'max-job-size': 65535,
'cmd-release': 0,
'cmd-reserve': 4386,
'pid': 23703,
'cmd-bury': 0,
'current-producers': 0,
'total-jobs': 4331,
'current-jobs-ready': 0,
'cmd-peek-buried': 0,
'current-tubes': 7,
'current-jobs-delayed': 0,
'uptime': 182954,
'cmd-watch': 55,
'job-timeouts': 0,
'cmd-stats': 1,
'rusage-stime': 295.970497,
'current-jobs-reserved': 0,
'current-jobs-buried': 0,
'cmd-reserve-with-timeout': 0,
'cmd-put': 4331,
'cmd-pause-tube': 0,
'cmd-list-tubes-watched': 0,
'cmd-list-tubes': 0,
'current-workers': 9,
'cmd-list-tube-used': 0,
'cmd-ignore': 0,
'binlog-records-migrated': 0,
'current-waiting': 9,
'cmd-peek': 0,
'cmd-peek-ready': 0,
'cmd-peek-delayed': 0,
'cmd-touch': 0,
'binlog-oldest-index': 0,
'binlog-current-index': 0,
'cmd-use': 4321,
'total-connections': 4387,
'cmd-delete': 4331,
'binlog-max-size': 10485760,
'cmd-stats-job': 0,
'rusage-utime': 125.92787,
'cmd-stats-tube': 0,
'binlog-records-written': 0,
'cmd-kick': 0,
'current-jobs-urgent': 0,
'tubes.default.current-jobs-delayed': 0,
'tubes.default.pause': 0,
'tubes.default.cmd-pause-tube': 0,
'tubes.default.current-jobs-buried': 0,
'tubes.default.cmd-delete': 10,
'tubes.default.pause-time-left': 0,
'tubes.default.current-waiting': 9,
'tubes.default.current-jobs-ready': 0,
'tubes.default.total-jobs': 10,
'tubes.default.current-watching': 10,
'tubes.default.current-jobs-reserved': 0,
'tubes.default.current-using': 10,
'tubes.default.current-jobs-urgent': 0,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
| mit |
jtoppins/beaker | SchemaUpgrades/upgrade_0.6.8_system_status_duration.py | 2 | 2652 | #!/usr/bin/python
import datetime
from sqlalchemy import and_
from turbogears.database import session
from bkr.server.util import load_config
from bkr.server.model import System, SystemStatus, SystemActivity, \
SystemStatusDuration
from bkr.server.test.assertions import assert_durations_not_overlapping, \
assert_durations_contiguous
def get_status(value):
if value == u'Working':
value = u'Automated'
try:
return SystemStatus.by_id(int(value))
except ValueError:
return SystemStatus.by_name(value)
def populate_status_durations(system):
assert not system.status_durations
# We don't know what the original status was, so let's set it to None for
# now and see if we can figure it out next
start_time = system.date_added
status = None
for activity in SystemActivity.query().filter(and_(
SystemActivity.object == system,
SystemActivity.field_name.in_([u'Status', u'status_id']),
SystemActivity.action == u'Changed'))\
.order_by(SystemActivity.created):
# Some old records have activity before date_added, probably because
# the former is not in UTC
changed_at = max(system.date_added, activity.created)
# If this is the first status change, old_value might tell us what it
# was before
if status is None:
if activity.old_value:
status = get_status(activity.old_value)
else:
# As a fallback, assume systems always started out broken
status = get_status(u'Broken')
new_status = get_status(activity.new_value)
# If the duration was non-zero, let's record it
if changed_at > start_time and status != new_status:
system.status_durations.append(SystemStatusDuration(
status=status, start_time=start_time, finish_time=changed_at))
status = new_status
start_time = changed_at
if status is None:
status = get_status(u'Broken')
system.status_durations.append(SystemStatusDuration(
status=status, start_time=start_time, finish_time=None))
assert_durations_not_overlapping(system.status_durations)
assert_durations_contiguous(system.status_durations)
assert system.date_added == system.status_durations[0].start_time
if __name__ == '__main__':
load_config()
session.begin()
for system_id in [s.id for s in System.query()]:
system = System.query().get(system_id)
populate_status_durations(system)
session.flush()
session.clear()
session.commit()
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.