repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
inspyration/odoo
|
openerp/addons/base/ir/ir_needaction.py
|
455
|
2704
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import osv
class ir_needaction_mixin(osv.AbstractModel):
"""Mixin class for objects using the need action feature.
Need action feature can be used by models that have to be able to
signal that an action is required on a particular record. If in
the business logic an action must be performed by somebody, for
instance validation by a manager, this mechanism allows to set a
list of users asked to perform an action.
Models using the 'need_action' feature should override the
``_needaction_domain_get`` method. This method returns a
domain to filter records requiring an action for a specific user.
This class also offers several global services:
- ``_needaction_count``: returns the number of actions uid has to perform
"""
_name = 'ir.needaction_mixin'
_needaction = True
#------------------------------------------------------
# Addons API
#------------------------------------------------------
def _needaction_domain_get(self, cr, uid, context=None):
""" Returns the domain to filter records that require an action
:return: domain or False is no action
"""
return False
#------------------------------------------------------
# "Need action" API
#------------------------------------------------------
def _needaction_count(self, cr, uid, domain=None, context=None):
""" Get the number of actions uid has to perform. """
dom = self._needaction_domain_get(cr, uid, context=context)
if not dom:
return 0
res = self.search(cr, uid, (domain or []) + dom, limit=100, order='id DESC', context=context)
return len(res)
|
agpl-3.0
|
ntt-sic/cinder
|
cinder/tests/test_wsgi.py
|
3
|
10311
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for `cinder.wsgi`."""
import mock
import os.path
import ssl
import tempfile
import urllib2
from oslo.config import cfg
import testtools
import webob
import webob.dec
from cinder import exception
from cinder.openstack.common import gettextutils
from cinder import test
from cinder import utils
import cinder.wsgi
CONF = cfg.CONF
TEST_VAR_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
'var'))
class TestLoaderNothingExists(test.TestCase):
"""Loader tests where os.path.exists always returns False."""
def setUp(self):
super(TestLoaderNothingExists, self).setUp()
self.stubs.Set(os.path, 'exists', lambda _: False)
def test_config_not_found(self):
self.assertRaises(
cinder.exception.ConfigNotFound,
cinder.wsgi.Loader,
)
class TestLoaderNormalFilesystem(test.TestCase):
"""Loader tests with normal filesystem (unmodified os.path module)."""
_paste_config = """
[app:test_app]
use = egg:Paste#static
document_root = /tmp
"""
def setUp(self):
super(TestLoaderNormalFilesystem, self).setUp()
self.config = tempfile.NamedTemporaryFile(mode="w+t")
self.config.write(self._paste_config.lstrip())
self.config.seek(0)
self.config.flush()
self.loader = cinder.wsgi.Loader(self.config.name)
self.addCleanup(self.config.close)
def test_config_found(self):
self.assertEqual(self.config.name, self.loader.config_path)
def test_app_not_found(self):
self.assertRaises(
cinder.exception.PasteAppNotFound,
self.loader.load_app,
"non-existent app",
)
def test_app_found(self):
url_parser = self.loader.load_app("test_app")
self.assertEqual("/tmp", url_parser.directory)
class TestWSGIServer(test.TestCase):
"""WSGI server tests."""
def _ipv6_configured():
try:
with file('/proc/net/if_inet6') as f:
return len(f.read()) > 0
except IOError:
return False
def test_no_app(self):
server = cinder.wsgi.Server("test_app", None)
self.assertEqual("test_app", server.name)
def test_start_random_port(self):
server = cinder.wsgi.Server("test_random_port", None, host="127.0.0.1")
self.assertEqual(0, server.port)
server.start()
self.assertNotEqual(0, server.port)
server.stop()
server.wait()
@testtools.skipIf(not _ipv6_configured(),
"Test requires an IPV6 configured interface")
def test_start_random_port_with_ipv6(self):
server = cinder.wsgi.Server("test_random_port",
None,
host="::1")
server.start()
self.assertEqual("::1", server.host)
self.assertNotEqual(0, server.port)
server.stop()
server.wait()
def test_app(self):
greetings = 'Hello, World!!!'
def hello_world(env, start_response):
if env['PATH_INFO'] != '/':
start_response('404 Not Found',
[('Content-Type', 'text/plain')])
return ['Not Found\r\n']
start_response('200 OK', [('Content-Type', 'text/plain')])
return [greetings]
server = cinder.wsgi.Server("test_app", hello_world)
server.start()
response = urllib2.urlopen('http://127.0.0.1:%d/' % server.port)
self.assertEqual(greetings, response.read())
server.stop()
def test_app_using_ssl(self):
CONF.set_default("ssl_cert_file",
os.path.join(TEST_VAR_DIR, 'certificate.crt'))
CONF.set_default("ssl_key_file",
os.path.join(TEST_VAR_DIR, 'privatekey.key'))
greetings = 'Hello, World!!!'
@webob.dec.wsgify
def hello_world(req):
return greetings
server = cinder.wsgi.Server("test_app", hello_world)
server.start()
response = urllib2.urlopen('https://127.0.0.1:%d/' % server.port)
self.assertEqual(greetings, response.read())
server.stop()
@testtools.skipIf(not _ipv6_configured(),
"Test requires an IPV6 configured interface")
def test_app_using_ipv6_and_ssl(self):
CONF.set_default("ssl_cert_file",
os.path.join(TEST_VAR_DIR, 'certificate.crt'))
CONF.set_default("ssl_key_file",
os.path.join(TEST_VAR_DIR, 'privatekey.key'))
greetings = 'Hello, World!!!'
@webob.dec.wsgify
def hello_world(req):
return greetings
server = cinder.wsgi.Server("test_app",
hello_world,
host="::1",
port=0)
server.start()
response = urllib2.urlopen('https://[::1]:%d/' % server.port)
self.assertEqual(greetings, response.read())
server.stop()
class ExceptionTest(test.TestCase):
def _wsgi_app(self, inner_app):
# NOTE(luisg): In order to test localization, we need to
# make sure the lazy _() is installed in the 'fault' module
# also we don't want to install the _() system-wide and
# potentially break other test cases, so we do it here for this
# test suite only.
gettextutils.install('', lazy=True)
from cinder.api.middleware import fault
return fault.FaultWrapper(inner_app)
def _do_test_exception_safety_reflected_in_faults(self, expose):
class ExceptionWithSafety(exception.CinderException):
safe = expose
@webob.dec.wsgify
def fail(req):
raise ExceptionWithSafety('some explanation')
api = self._wsgi_app(fail)
resp = webob.Request.blank('/').get_response(api)
self.assertIn('{"computeFault', resp.body)
expected = ('ExceptionWithSafety: some explanation' if expose else
'The server has either erred or is incapable '
'of performing the requested operation.')
self.assertIn(expected, resp.body)
self.assertEqual(resp.status_int, 500, resp.body)
def test_safe_exceptions_are_described_in_faults(self):
self._do_test_exception_safety_reflected_in_faults(True)
def test_unsafe_exceptions_are_not_described_in_faults(self):
self._do_test_exception_safety_reflected_in_faults(False)
def _do_test_exception_mapping(self, exception_type, msg):
@webob.dec.wsgify
def fail(req):
raise exception_type(msg)
api = self._wsgi_app(fail)
resp = webob.Request.blank('/').get_response(api)
self.assertIn(msg, resp.body)
self.assertEqual(resp.status_int, exception_type.code, resp.body)
if hasattr(exception_type, 'headers'):
for (key, value) in exception_type.headers.iteritems():
self.assertIn(key, resp.headers)
self.assertEqual(resp.headers[key], value)
def test_quota_error_mapping(self):
self._do_test_exception_mapping(exception.QuotaError, 'too many used')
def test_non_cinder_notfound_exception_mapping(self):
class ExceptionWithCode(Exception):
code = 404
self._do_test_exception_mapping(ExceptionWithCode,
'NotFound')
def test_non_cinder_exception_mapping(self):
class ExceptionWithCode(Exception):
code = 417
self._do_test_exception_mapping(ExceptionWithCode,
'Expectation failed')
def test_exception_with_none_code_throws_500(self):
class ExceptionWithNoneCode(Exception):
code = None
msg = 'Internal Server Error'
@webob.dec.wsgify
def fail(req):
raise ExceptionWithNoneCode()
api = self._wsgi_app(fail)
resp = webob.Request.blank('/').get_response(api)
self.assertEqual(500, resp.status_int)
@mock.patch('cinder.openstack.common.gettextutils.get_localized_message')
def test_cinder_exception_with_localized_explanation(self, mock_t9n):
msg = 'My Not Found'
msg_translation = 'Mi No Encontrado'
message = gettextutils.Message(msg, '')
@webob.dec.wsgify
def fail(req):
class MyVolumeNotFound(exception.NotFound):
def __init__(self):
self.msg = message
self.safe = True
raise MyVolumeNotFound()
# Test response without localization
def mock_get_non_localized_message(msgid, locale):
return msg
mock_t9n.side_effect = mock_get_non_localized_message
api = self._wsgi_app(fail)
resp = webob.Request.blank('/').get_response(api)
self.assertEqual(404, resp.status_int)
self.assertIn(msg, resp.body)
# Test response with localization
def mock_get_localized_message(msgid, locale):
if isinstance(msgid, gettextutils.Message):
return msg_translation
return msgid
mock_t9n.side_effect = mock_get_localized_message
api = self._wsgi_app(fail)
resp = webob.Request.blank('/').get_response(api)
self.assertEqual(404, resp.status_int)
self.assertIn(msg_translation, resp.body)
|
apache-2.0
|
adviti/melange
|
thirdparty/google_appengine/google/appengine/_internal/django/core/mail/message.py
|
23
|
11568
|
import mimetypes
import os
import random
import time
from email import Charset, Encoders
from email.MIMEText import MIMEText
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.Header import Header
from email.Utils import formatdate, getaddresses, formataddr
from google.appengine._internal.django.conf import settings
from google.appengine._internal.django.core.mail.utils import DNS_NAME
from google.appengine._internal.django.utils.encoding import smart_str, force_unicode
from email.Utils import parseaddr
# Don't BASE64-encode UTF-8 messages so that we avoid unwanted attention from
# some spam filters.
Charset.add_charset('utf-8', Charset.SHORTEST, Charset.QP, 'utf-8')
# Default MIME type to use on attachments (if it is not explicitly given
# and cannot be guessed).
DEFAULT_ATTACHMENT_MIME_TYPE = 'application/octet-stream'
class BadHeaderError(ValueError):
pass
# Copied from Python standard library, with the following modifications:
# * Used cached hostname for performance.
# * Added try/except to support lack of getpid() in Jython (#5496).
def make_msgid(idstring=None):
"""Returns a string suitable for RFC 2822 compliant Message-ID, e.g:
<[email protected]>
Optional idstring if given is a string used to strengthen the
uniqueness of the message id.
"""
timeval = time.time()
utcdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(timeval))
try:
pid = os.getpid()
except AttributeError:
# No getpid() in Jython, for example.
pid = 1
randint = random.randrange(100000)
if idstring is None:
idstring = ''
else:
idstring = '.' + idstring
idhost = DNS_NAME
msgid = '<%s.%s.%s%s@%s>' % (utcdate, pid, randint, idstring, idhost)
return msgid
# Header names that contain structured address data (RFC #5322)
ADDRESS_HEADERS = set([
'from',
'sender',
'reply-to',
'to',
'cc',
'bcc',
'resent-from',
'resent-sender',
'resent-to',
'resent-cc',
'resent-bcc',
])
def forbid_multi_line_headers(name, val, encoding):
"""Forbids multi-line headers, to prevent header injection."""
encoding = encoding or settings.DEFAULT_CHARSET
val = force_unicode(val)
if '\n' in val or '\r' in val:
raise BadHeaderError("Header values can't contain newlines (got %r for header %r)" % (val, name))
try:
val = val.encode('ascii')
except UnicodeEncodeError:
if name.lower() in ADDRESS_HEADERS:
val = ', '.join(sanitize_address(addr, encoding)
for addr in getaddresses((val,)))
else:
val = str(Header(val, encoding))
else:
if name.lower() == 'subject':
val = Header(val)
return name, val
def sanitize_address(addr, encoding):
if isinstance(addr, basestring):
addr = parseaddr(force_unicode(addr))
nm, addr = addr
nm = str(Header(nm, encoding))
try:
addr = addr.encode('ascii')
except UnicodeEncodeError: # IDN
if u'@' in addr:
localpart, domain = addr.split(u'@', 1)
localpart = str(Header(localpart, encoding))
domain = domain.encode('idna')
addr = '@'.join([localpart, domain])
else:
addr = str(Header(addr, encoding))
return formataddr((nm, addr))
class SafeMIMEText(MIMEText):
def __init__(self, text, subtype, charset):
self.encoding = charset
MIMEText.__init__(self, text, subtype, charset)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEText.__setitem__(self, name, val)
class SafeMIMEMultipart(MIMEMultipart):
def __init__(self, _subtype='mixed', boundary=None, _subparts=None, encoding=None, **_params):
self.encoding = encoding
MIMEMultipart.__init__(self, _subtype, boundary, _subparts, **_params)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEMultipart.__setitem__(self, name, val)
class EmailMessage(object):
"""
A container for email information.
"""
content_subtype = 'plain'
mixed_subtype = 'mixed'
encoding = None # None => use settings default
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings
(or UTF-8 bytestrings). The SafeMIMEText class will handle any
necessary encoding conversions.
"""
if to:
assert not isinstance(to, basestring), '"to" argument must be a list or tuple'
self.to = list(to)
else:
self.to = []
if bcc:
assert not isinstance(bcc, basestring), '"bcc" argument must be a list or tuple'
self.bcc = list(bcc)
else:
self.bcc = []
self.from_email = from_email or settings.DEFAULT_FROM_EMAIL
self.subject = subject
self.body = body
self.attachments = attachments or []
self.extra_headers = headers or {}
self.connection = connection
def get_connection(self, fail_silently=False):
from google.appengine._internal.django.core.mail import get_connection
if not self.connection:
self.connection = get_connection(fail_silently=fail_silently)
return self.connection
def message(self):
encoding = self.encoding or settings.DEFAULT_CHARSET
msg = SafeMIMEText(smart_str(self.body, encoding),
self.content_subtype, encoding)
msg = self._create_message(msg)
msg['Subject'] = self.subject
msg['From'] = self.extra_headers.get('From', self.from_email)
msg['To'] = ', '.join(self.to)
# Email header names are case-insensitive (RFC 2045), so we have to
# accommodate that when doing comparisons.
header_names = [key.lower() for key in self.extra_headers]
if 'date' not in header_names:
msg['Date'] = formatdate()
if 'message-id' not in header_names:
msg['Message-ID'] = make_msgid()
for name, value in self.extra_headers.items():
if name.lower() == 'from': # From is already handled
continue
msg[name] = value
return msg
def recipients(self):
"""
Returns a list of all recipients of the email (includes direct
addressees as well as Bcc entries).
"""
return self.to + self.bcc
def send(self, fail_silently=False):
"""Sends the email message."""
if not self.recipients():
# Don't bother creating the network connection if there's nobody to
# send to.
return 0
return self.get_connection(fail_silently).send_messages([self])
def attach(self, filename=None, content=None, mimetype=None):
"""
Attaches a file with the given filename and content. The filename can
be omitted and the mimetype is guessed, if not provided.
If the first parameter is a MIMEBase subclass it is inserted directly
into the resulting message attachments.
"""
if isinstance(filename, MIMEBase):
assert content == mimetype == None
self.attachments.append(filename)
else:
assert content is not None
self.attachments.append((filename, content, mimetype))
def attach_file(self, path, mimetype=None):
"""Attaches a file from the filesystem."""
filename = os.path.basename(path)
content = open(path, 'rb').read()
self.attach(filename, content, mimetype)
def _create_message(self, msg):
return self._create_attachments(msg)
def _create_attachments(self, msg):
if self.attachments:
encoding = self.encoding or settings.DEFAULT_CHARSET
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.mixed_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for attachment in self.attachments:
if isinstance(attachment, MIMEBase):
msg.attach(attachment)
else:
msg.attach(self._create_attachment(*attachment))
return msg
def _create_mime_attachment(self, content, mimetype):
"""
Converts the content, mimetype pair into a MIME attachment object.
"""
basetype, subtype = mimetype.split('/', 1)
if basetype == 'text':
encoding = self.encoding or settings.DEFAULT_CHARSET
attachment = SafeMIMEText(smart_str(content, encoding), subtype, encoding)
else:
# Encode non-text attachments with base64.
attachment = MIMEBase(basetype, subtype)
attachment.set_payload(content)
Encoders.encode_base64(attachment)
return attachment
def _create_attachment(self, filename, content, mimetype=None):
"""
Converts the filename, content, mimetype triple into a MIME attachment
object.
"""
if mimetype is None:
mimetype, _ = mimetypes.guess_type(filename)
if mimetype is None:
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
attachment = self._create_mime_attachment(content, mimetype)
if filename:
attachment.add_header('Content-Disposition', 'attachment',
filename=filename)
return attachment
class EmailMultiAlternatives(EmailMessage):
"""
A version of EmailMessage that makes it easy to send multipart/alternative
messages. For example, including text and HTML versions of the text is
made easier.
"""
alternative_subtype = 'alternative'
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, alternatives=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings (or UTF-8
bytestrings). The SafeMIMEText class will handle any necessary encoding
conversions.
"""
super(EmailMultiAlternatives, self).__init__(subject, body, from_email, to, bcc, connection, attachments, headers)
self.alternatives = alternatives or []
def attach_alternative(self, content, mimetype):
"""Attach an alternative content representation."""
assert content is not None
assert mimetype is not None
self.alternatives.append((content, mimetype))
def _create_message(self, msg):
return self._create_attachments(self._create_alternatives(msg))
def _create_alternatives(self, msg):
encoding = self.encoding or settings.DEFAULT_CHARSET
if self.alternatives:
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.alternative_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for alternative in self.alternatives:
msg.attach(self._create_mime_attachment(*alternative))
return msg
|
apache-2.0
|
MyKings/xunfeng
|
vulscan/vuldb/crack_jenkins.py
|
2
|
3299
|
# coding:utf-8
# author:wolf
import urllib2
import re
import json
def get_plugin_info():
plugin_info = {
"name": "Jenkins控制台弱口令",
"info": "攻击者通过此漏洞可以访问查看项目代码信息,通过script功能可执行命令直接获取服务器权限。",
"level": "高危",
"type": "弱口令",
"author": "wolf@YSRC",
"url": "https://www.secpulse.com/archives/2166.html",
"keyword": "tag:jenkins",
"source": 1
}
return plugin_info
def get_user_list(url,timeout):
user_list = []
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor())
try:
req = opener.open(url + "/asynchPeople/", timeout=timeout)
res_html = req.read()
except:
return user_list
m = re.search("makeStaplerProxy\('(.*?)','(.*?)'",res_html)
if m:
user_url = url + m.group(1)
crumb = m.group(2)
request = urllib2.Request(user_url+"/start","[]")
set_request(request,crumb)
try:
opener.open(request, timeout=timeout)
except:
pass
while True:
request = urllib2.Request(user_url+"/news","[]")
set_request(request,crumb)
user_data = opener.open(request, timeout=timeout).read()
if len(user_data) >=20:
user_array = json.loads(user_data)
for _ in user_array["data"]:
user_list.append(_["id"].encode("utf-8"))
if user_array["status"] == "done":break
else:break
return user_list
def set_request(request,crumb):
request.add_header('Content-Type', 'application/x-stapler-method-invocation;charset=UTF-8')
request.add_header('X-Requested-With', 'XMLHttpRequest')
request.add_header('Crumb', crumb)
def crack(url,user_list,timeout):
error_i = 0
for user in user_list:
for password in PASSWORD_DIC:
try:
login_url = url + '/j_acegi_security_check'
PostStr = 'j_username=%s&j_password=%s' % (user, password)
request = urllib2.Request(login_url, PostStr)
res = urllib2.urlopen(request, timeout=timeout)
if res.code == 200 and "X-Jenkins" in res.headers:
info = u'存在弱口令,用户名:%s,密码:%s' % (user, password)
return info
except urllib2.HTTPError, e:
continue
except urllib2.URLError, e:
error_i += 1
if error_i >= 3:
return
def check(host, port, timeout):
url = "http://%s:%d" % (host, int(port))
try:
res_html = urllib2.urlopen(url,timeout=timeout).read()
except urllib2.HTTPError, e:
res_html = e.read()
if "/asynchPeople/" in res_html:
if '"/manage" class="task-link' in res_html:
return u"未授权访问且为管理员权限"
user_list = get_user_list(url,timeout)
result = crack(url,user_list,timeout)
if result:
return result
else:
return u"未授权访问"
elif "anonymous" in res_html:
user_list = ["admin","test"]
info = crack(url,user_list,timeout)
return info
|
gpl-3.0
|
TaliesinSkye/evennia
|
src/typeclasses/models.py
|
1
|
52630
|
"""
This is the *abstract* django models for many of the database objects
in Evennia. A django abstract (obs, not the same as a Python metaclass!) is
a model which is not actually created in the database, but which only exists
for other models to inherit from, to avoid code duplication. Any model can
import and inherit from these classes.
Attributes are database objects stored on other objects. The implementing
class needs to supply a ForeignKey field attr_object pointing to the kind
of object being mapped. Attributes storing iterables actually store special
types of iterables named PackedList/PackedDict respectively. These make
sure to save changes to them to database - this is criticial in order to
allow for obj.db.mylist[2] = data. Also, all dbobjects are saved as
dbrefs but are also aggressively cached.
TypedObjects are objects 'decorated' with a typeclass - that is, the typeclass
(which is a normal Python class implementing some special tricks with its
get/set attribute methods, allows for the creation of all sorts of different
objects all with the same database object underneath. Usually attributes are
used to permanently store things not hard-coded as field on the database object.
The admin should usually not have to deal directly with the database object
layer.
This module also contains the Managers for the respective models; inherit from
these to create custom managers.
"""
import sys
#try:
# import cPickle as pickle
#except ImportError:
# import pickle
import traceback
#from collections import defaultdict
from django.db import models, IntegrityError
from django.conf import settings
from django.utils.encoding import smart_str
from django.contrib.contenttypes.models import ContentType
from src.utils.idmapper.models import SharedMemoryModel
from src.server.caches import get_field_cache, set_field_cache, del_field_cache
from src.server.caches import get_attr_cache, set_attr_cache, del_attr_cache
from src.server.caches import get_prop_cache, set_prop_cache, del_prop_cache, flush_attr_cache
from src.server.caches import call_ndb_hooks
from src.server.models import ServerConfig
from src.typeclasses import managers
from src.locks.lockhandler import LockHandler
from src.utils import logger, utils
from src.utils.utils import make_iter, is_iter, to_unicode, to_str
from src.utils.dbserialize import to_pickle, from_pickle
from src.utils.picklefield import PickledObjectField
__all__ = ("Attribute", "TypeNick", "TypedObject")
_PERMISSION_HIERARCHY = [p.lower() for p in settings.PERMISSION_HIERARCHY]
_CTYPEGET = ContentType.objects.get
_GA = object.__getattribute__
_SA = object.__setattr__
_DA = object.__delattr__
#_PLOADS = pickle.loads
#_PDUMPS = pickle.dumps
#------------------------------------------------------------
#
# Attributes
#
#------------------------------------------------------------
class Attribute(SharedMemoryModel):
"""
Abstract django model.
Attributes are things that are specific to different types of objects. For
example, a drink container needs to store its fill level, whereas an exit
needs to store its open/closed/locked/unlocked state. These are done via
attributes, rather than making different classes for each object type and
storing them directly. The added benefit is that we can add/remove
attributes on the fly as we like.
The Attribute class defines the following properties:
key - primary identifier
mode - which type of data is stored in attribute
lock_storage - perm strings
obj - which object the attribute is defined on
date_created - when the attribute was created
value - the data stored in the attribute
what is actually stored in the field is a dict
{type : nodb|dbobj|dbiter,
data : <data>}
where type is info for the loader, telling it if holds a single
dbobject (dbobj), have to do a full scan for dbrefs (dbiter) or
if it is a normal Python structure without any dbobjs inside it
and can thus return it without further action (nodb).
"""
#
# Attribute Database Model setup
#
#
# These databse fields are all set using their corresponding properties,
# named same as the field, but withtout the db_* prefix.
db_key = models.CharField('key', max_length=255, db_index=True)
# access through the value property
db_value = PickledObjectField('value2', null=True)
# Lock storage
db_lock_storage = models.TextField('locks', blank=True)
# references the object the attribute is linked to (this is set
# by each child class to this abstact class)
db_obj = None # models.ForeignKey("RefencedObject")
# time stamp
db_date_created = models.DateTimeField('date_created', editable=False, auto_now_add=True)
# Database manager
objects = managers.AttributeManager()
# Lock handler self.locks
def __init__(self, *args, **kwargs):
"Initializes the parent first -important!"
SharedMemoryModel.__init__(self, *args, **kwargs)
self.locks = LockHandler(self)
self.no_cache = True
self.cached_value = None
class Meta:
"Define Django meta options"
abstract = True
verbose_name = "Evennia Attribute"
# Wrapper properties to easily set database fields. These are
# @property decorators that allows to access these fields using
# normal python operations (without having to remember to save()
# etc). So e.g. a property 'attr' has a get/set/del decorator
# defined that allows the user to do self.attr = value,
# value = self.attr and del self.attr respectively (where self
# is the object in question).
# key property (wraps db_key)
#@property
def __key_get(self):
"Getter. Allows for value = self.key"
return get_field_cache(self, "key")
#@key.setter
def __key_set(self, value):
"Setter. Allows for self.key = value"
set_field_cache(self, "key", value)
#@key.deleter
def __key_del(self):
"Deleter. Allows for del self.key"
raise Exception("Cannot delete attribute key!")
key = property(__key_get, __key_set, __key_del)
# obj property (wraps db_obj)
#@property
def __obj_get(self):
"Getter. Allows for value = self.obj"
return get_field_cache(self, "obj")
#@obj.setter
def __obj_set(self, value):
"Setter. Allows for self.obj = value"
set_field_cache(self, "obj", value)
#@obj.deleter
def __obj_del(self):
"Deleter. Allows for del self.obj"
self.db_obj = None
self.save()
del_field_cache(self, "obj")
obj = property(__obj_get, __obj_set, __obj_del)
# date_created property (wraps db_date_created)
#@property
def __date_created_get(self):
"Getter. Allows for value = self.date_created"
return get_field_cache(self, "date_created")
#@date_created.setter
def __date_created_set(self, value):
"Setter. Allows for self.date_created = value"
raise Exception("Cannot edit date_created!")
#@date_created.deleter
def __date_created_del(self):
"Deleter. Allows for del self.date_created"
raise Exception("Cannot delete date_created!")
date_created = property(__date_created_get, __date_created_set, __date_created_del)
# value property (wraps db_value)
#@property
def __value_get(self):
"""
Getter. Allows for value = self.value. Reads from cache if possible.
"""
if self.no_cache:
# re-create data from database and cache it
value = from_pickle(self.db_value, db_obj=self)
self.cached_value = value
self.no_cache = False
return self.cached_value
#@value.setter
def __value_set(self, new_value):
"""
Setter. Allows for self.value = value. We make sure to cache everything.
"""
to_store = to_pickle(new_value)
self.cached_value = from_pickle(to_store, db_obj=self)
self.no_cache = False
self.db_value = to_store
self.save()
self.at_set(self.cached_value)
#@value.deleter
def __value_del(self):
"Deleter. Allows for del attr.value. This removes the entire attribute."
self.delete()
value = property(__value_get, __value_set, __value_del)
# lock_storage property (wraps db_lock_storage)
#@property
def __lock_storage_get(self):
"Getter. Allows for value = self.lock_storage"
return get_field_cache(self, "lock_storage")
#@lock_storage.setter
def __lock_storage_set(self, value):
"""Saves the lock_storage. This is usually not called directly, but through self.lock()"""
self.db_lock_storage = value
self.save()
#@lock_storage.deleter
def __lock_storage_del(self):
"Deleter is disabled. Use the lockhandler.delete (self.lock.delete) instead"""
logger.log_errmsg("Lock_Storage (on %s) cannot be deleted. Use obj.lock.delete() instead." % self)
lock_storage = property(__lock_storage_get, __lock_storage_set, __lock_storage_del)
#
#
# Attribute methods
#
#
def __str__(self):
return smart_str("%s(%s)" % (self.key, self.id))
def __unicode__(self):
return u"%s(%s)" % (self.key, self.id)
def access(self, accessing_obj, access_type='read', default=False):
"""
Determines if another object has permission to access.
accessing_obj - object trying to access this one
access_type - type of access sought
default - what to return if no lock of access_type was found
"""
return self.locks.check(accessing_obj, access_type=access_type, default=default)
def at_set(self, new_value):
"""
Hook method called when the attribute changes value.
"""
pass
#------------------------------------------------------------
#
# Nicks
#
#------------------------------------------------------------
class TypeNick(SharedMemoryModel):
"""
This model holds whichever alternate names this object
has for OTHER objects, but also for arbitrary strings,
channels, players etc. Setting a nick does not affect
the nicknamed object at all (as opposed to Aliases above),
and only this object will be able to refer to the nicknamed
object by the given nick.
The default nick types used by Evennia are:
inputline (default) - match against all input
player - match against player searches
obj - match against object searches
channel - used to store own names for channels
"""
db_nick = models.CharField('nickname',max_length=255, db_index=True, help_text='the alias')
db_real = models.TextField('realname', help_text='the original string to match and replace.')
db_type = models.CharField('nick type',default="inputline", max_length=16, null=True, blank=True,
help_text="the nick type describes when the engine tries to do nick-replacement. Common options are 'inputline','player','obj' and 'channel'. Inputline checks everything being inserted, whereas the other cases tries to replace in various searches or when posting to channels.")
db_obj = None #models.ForeignKey("ObjectDB")
class Meta:
"Define Django meta options"
abstract = True
verbose_name = "Nickname"
unique_together = ("db_nick", "db_type", "db_obj")
class TypeNickHandler(object):
"""
Handles nick access and setting. Accessed through ObjectDB.nicks
"""
NickClass = TypeNick
def __init__(self, obj):
"""
This handler allows for accessing and setting nicks -
on-the-fly replacements for various text input passing through
this object (most often a Character)
The default nick types used by Evennia are:
inputline (default) - match against all input
player - match against player searches
obj - match against object searches
channel - used to store own names for channels
You can define other nicktypes by using the add() method of
this handler and set nick_type to whatever you want. It's then
up to you to somehow make use of this nick_type in your game
(such as for a "recog" system).
"""
self.obj = obj
def add(self, nick, realname, nick_type="inputline"):
"""
Assign a new nick for realname.
nick_types used by Evennia are
'inputline', 'player', 'obj' and 'channel'
"""
if not nick or not nick.strip():
return
nick = nick.strip()
real = realname.strip()
query = self.NickClass.objects.filter(db_obj=self.obj, db_nick__iexact=nick, db_type__iexact=nick_type)
if query.count():
old_nick = query[0]
old_nick.db_real = real
old_nick.save()
else:
new_nick = self.NickClass(db_nick=nick, db_real=real, db_type=nick_type, db_obj=self.obj)
new_nick.save()
def delete(self, nick, nick_type="inputline"):
"Removes a previously stored nick"
nick = nick.strip()
query = self.NickClass.objects.filter(db_obj=self.obj, db_nick__iexact=nick, db_type__iexact=nick_type)
if query.count():
# remove the found nick(s)
query.delete()
def get(self, nick=None, nick_type="inputline", obj=None):
"""
Retrieves a given nick (with a specified nick_type) on an object. If no nick is given, returns a list
of all nicks on the object, or the empty list.
Defaults to searching the current object.
"""
if not obj:
# defaults to the current object
obj = self.obj
if nick:
query = self.NickClass.objects.filter(db_obj=obj, db_nick__iexact=nick, db_type__iexact=nick_type)
query = query.values_list("db_real", flat=True)
if query.count():
return query[0]
else:
return nick
else:
return self.NickClass.objects.filter(db_obj=obj)
def has(self, nick, nick_type="inputline", obj=None):
"""
Returns true/false if this nick and nick_type is defined on the given
object or not. If no obj is given, default to the current object the
handler is defined on.
"""
if not obj:
obj = self.obj
return self.NickClass.objects.filter(db_obj=obj, db_nick__iexact=nick, db_type__iexact=nick_type).count()
#------------------------------------------------------------
#
# Typed Objects
#
#------------------------------------------------------------
class TypedObject(SharedMemoryModel):
"""
Abstract Django model.
This is the basis for a typed object. It also contains all the
mechanics for managing connected attributes.
The TypedObject has the following properties:
key - main name
name - alias for key
typeclass_path - the path to the decorating typeclass
typeclass - auto-linked typeclass
date_created - time stamp of object creation
permissions - perm strings
dbref - #id of object
db - persistent attribute storage
ndb - non-persistent attribute storage
"""
#
# TypedObject Database Model setup
#
#
# These databse fields are all set using their corresponding properties,
# named same as the field, but withtou the db_* prefix.
# Main identifier of the object, for searching. Can also
# be referenced as 'name'.
db_key = models.CharField('key', max_length=255, db_index=True)
# This is the python path to the type class this object is tied to
# (the type class is what defines what kind of Object this is)
db_typeclass_path = models.CharField('typeclass', max_length=255, null=True, help_text="this defines what 'type' of entity this is. This variable holds a Python path to a module with a valid Evennia Typeclass.")
# Creation date
db_date_created = models.DateTimeField('creation date', editable=False, auto_now_add=True)
# Permissions (access these through the 'permissions' property)
db_permissions = models.CharField('permissions', max_length=255, blank=True, help_text="a comma-separated list of text strings checked by certain locks. They are often used for hierarchies, such as letting a Player have permission 'Wizards', 'Builders' etc. Character objects use 'Players' by default. Most other objects don't have any permissions.")
# Lock storage
db_lock_storage = models.TextField('locks', blank=True, help_text="locks limit access to an entity. A lock is defined as a 'lock string' on the form 'type:lockfunctions', defining what functionality is locked and how to determine access. Not defining a lock means no access is granted.")
# Database manager
objects = managers.TypedObjectManager()
# object cache and flags
_cached_typeclass = None
# lock handler self.locks
def __init__(self, *args, **kwargs):
"We must initialize the parent first - important!"
SharedMemoryModel.__init__(self, *args, **kwargs)
self.locks = LockHandler(self)
class Meta:
"""
Django setup info.
"""
abstract = True
verbose_name = "Evennia Database Object"
ordering = ['-db_date_created', 'id', 'db_typeclass_path', 'db_key']
# Wrapper properties to easily set database fields. These are
# @property decorators that allows to access these fields using
# normal python operations (without having to remember to save()
# etc). So e.g. a property 'attr' has a get/set/del decorator
# defined that allows the user to do self.attr = value,
# value = self.attr and del self.attr respectively (where self
# is the object in question).
# key property (wraps db_key)
#@property
def __key_get(self):
"Getter. Allows for value = self.key"
return get_field_cache(self, "key")
#@key.setter
def __key_set(self, value):
"Setter. Allows for self.key = value"
set_field_cache(self, "key", value)
#@key.deleter
def __key_del(self):
"Deleter. Allows for del self.key"
raise Exception("Cannot delete objectdb key!")
key = property(__key_get, __key_set, __key_del)
# name property (wraps db_key too - alias to self.key)
#@property
def __name_get(self):
"Getter. Allows for value = self.name"
return get_field_cache(self, "key")
#@name.setter
def __name_set(self, value):
"Setter. Allows for self.name = value"
set_field_cache(self, "key", value)
#@name.deleter
def __name_del(self):
"Deleter. Allows for del self.name"
raise Exception("Cannot delete name!")
name = property(__name_get, __name_set, __name_del)
# typeclass_path property
#@property
def __typeclass_path_get(self):
"Getter. Allows for value = self.typeclass_path"
return get_field_cache(self, "typeclass_path")
#@typeclass_path.setter
def __typeclass_path_set(self, value):
"Setter. Allows for self.typeclass_path = value"
set_field_cache(self, "typeclass_path", value)
_SA(self, "_cached_typeclass", None)
#@typeclass_path.deleter
def __typeclass_path_del(self):
"Deleter. Allows for del self.typeclass_path"
self.db_typeclass_path = ""
self.save()
del_field_cache(self, "typeclass_path")
_SA(self, "_cached_typeclass", None)
typeclass_path = property(__typeclass_path_get, __typeclass_path_set, __typeclass_path_del)
# date_created property
#@property
def __date_created_get(self):
"Getter. Allows for value = self.date_created"
return get_field_cache(self, "date_created")
#@date_created.setter
def __date_created_set(self, value):
"Setter. Allows for self.date_created = value"
raise Exception("Cannot change date_created!")
#@date_created.deleter
def __date_created_del(self):
"Deleter. Allows for del self.date_created"
raise Exception("Cannot delete date_created!")
date_created = property(__date_created_get, __date_created_set, __date_created_del)
# permissions property
#@property
def __permissions_get(self):
"Getter. Allows for value = self.name. Returns a list of permissions."
perms = get_field_cache(self, "permissions")
if perms:
return [perm.strip() for perm in perms.split(',')]
return []
#@permissions.setter
def __permissions_set(self, value):
"Setter. Allows for self.name = value. Stores as a comma-separated string."
value = ",".join([utils.to_unicode(val).strip() for val in make_iter(value)])
set_field_cache(self, "permissions", value)
#@permissions.deleter
def __permissions_del(self):
"Deleter. Allows for del self.name"
self.db_permissions = ""
self.save()
del_field_cache(self, "permissions")
permissions = property(__permissions_get, __permissions_set, __permissions_del)
# lock_storage property (wraps db_lock_storage)
#@property
def __lock_storage_get(self):
"Getter. Allows for value = self.lock_storage"
return get_field_cache(self, "lock_storage")
#@lock_storage.setter
def __lock_storage_set(self, value):
"""Saves the lock_storagetodate. This is usually not called directly, but through self.lock()"""
set_field_cache(self, "lock_storage", value)
#@lock_storage.deleter
def __lock_storage_del(self):
"Deleter is disabled. Use the lockhandler.delete (self.lock.delete) instead"""
logger.log_errmsg("Lock_Storage (on %s) cannot be deleted. Use obj.lock.delete() instead." % self)
lock_storage = property(__lock_storage_get, __lock_storage_set, __lock_storage_del)
#
#
# TypedObject main class methods and properties
#
#
# these are identifiers for fast Attribute access and caching
_typeclass_paths = settings.OBJECT_TYPECLASS_PATHS
_attribute_class = Attribute # replaced by relevant attribute class for child
_db_model_name = "typeclass" # used by attributes to safely store objects
def __eq__(self, other):
return other and hasattr(other, 'dbid') and self.dbid == other.dbid
def __str__(self):
return smart_str("%s" % self.key)
def __unicode__(self):
return u"%s" % self.key
def __getattribute__(self, propname):
"""
Will predominantly look for an attribute
on this object, but if not found we will
check if it might exist on the typeclass instead. Since
the typeclass refers back to the databaseobject as well, we
have to be very careful to avoid loops.
"""
try:
return _GA(self, propname)
except AttributeError:
# check if the attribute exists on the typeclass instead
# (we make sure to not incur a loop by not triggering the
# typeclass' __getattribute__, since that one would
# try to look back to this very database object.)
return _GA(_GA(self, 'typeclass'), propname)
def _hasattr(self, obj, attrname):
"""
Loop-safe version of hasattr, to avoid running a lookup that
will be rerouted up the typeclass. Returns True/False.
"""
try:
_GA(obj, attrname)
return True
except AttributeError:
return False
#@property
def __dbid_get(self):
"""
Caches and returns the unique id of the object.
Use this instead of self.id, which is not cached.
"""
dbid = get_prop_cache(self, "_dbid")
if not dbid:
dbid = _GA(self, "id")
set_prop_cache(self, "_dbid", dbid)
return dbid
def __dbid_set(self, value):
raise Exception("dbid cannot be set!")
def __dbid_del(self):
raise Exception("dbid cannot be deleted!")
dbid = property(__dbid_get, __dbid_set, __dbid_del)
#@property
def __dbref_get(self):
"""
Returns the object's dbref on the form #NN.
"""
return "#%s" % _GA(self, "_TypedObject__dbid_get")()
def __dbref_set(self):
raise Exception("dbref cannot be set!")
def __dbref_del(self):
raise Exception("dbref cannot be deleted!")
dbref = property(__dbref_get, __dbref_set, __dbref_del)
# typeclass property
#@property
def __typeclass_get(self):
"""
Getter. Allows for value = self.typeclass.
The typeclass is a class object found at self.typeclass_path;
it allows for extending the Typed object for all different
types of objects that the game needs. This property
handles loading and initialization of the typeclass on the fly.
Note: The liberal use of _GA and __setattr__ (instead
of normal dot notation) is due to optimization: it avoids calling
the custom self.__getattribute__ more than necessary.
"""
path = _GA(self, "typeclass_path")
typeclass = _GA(self, "_cached_typeclass")
try:
if typeclass and _GA(typeclass, "path") == path:
# don't call at_init() when returning from cache
return typeclass
except AttributeError:
pass
errstring = ""
if not path:
# this means we should get the default obj without giving errors.
return _GA(self, "_get_default_typeclass")(cache=True, silent=True, save=True)
else:
# handle loading/importing of typeclasses, searching all paths.
# (self._typeclass_paths is a shortcut to settings.TYPECLASS_*_PATHS
# where '*' is either OBJECT, SCRIPT or PLAYER depending on the typed
# entities).
typeclass_paths = [path] + ["%s.%s" % (prefix, path) for prefix in _GA(self, '_typeclass_paths')]
for tpath in typeclass_paths:
# try to import and analyze the result
typeclass = _GA(self, "_path_import")(tpath)
#print "typeclass:",typeclass,tpath
if callable(typeclass):
# we succeeded to import. Cache and return.
_SA(self, "typeclass_path", tpath)
typeclass = typeclass(self)
_SA(self, "_cached_typeclass", typeclass)
try:
typeclass.at_init()
except AttributeError:
logger.log_trace("\n%s: Error initializing typeclass %s. Using default." % (self, tpath))
break
except Exception:
logger.log_trace()
return typeclass
elif hasattr(typeclass, '__file__'):
errstring += "\n%s seems to be just the path to a module. You need" % tpath
errstring += " to specify the actual typeclass name inside the module too."
else:
errstring += "\n%s" % typeclass # this will hold a growing error message.
# If we reach this point we couldn't import any typeclasses. Return default. It's up to the calling
# method to use e.g. self.is_typeclass() to detect that the result is not the one asked for.
_GA(self, "_display_errmsg")(errstring)
_SA(self, "typeclass_lasterrmsg", errstring)
return _GA(self, "_get_default_typeclass")(cache=False, silent=False, save=False)
#@typeclass.deleter
def __typeclass_del(self):
"Deleter. Disallow 'del self.typeclass'"
raise Exception("The typeclass property should never be deleted, only changed in-place!")
# typeclass property
typeclass = property(__typeclass_get, fdel=__typeclass_del)
# the last error string will be stored here for accessing methods to access.
# It is set by _display_errmsg, which will print to log if error happens
# during server startup.
typeclass_last_errmsg = ""
def _path_import(self, path):
"""
Import a class from a python path of the
form src.objects.object.Object
"""
errstring = ""
if not path:
# this needs not be bad, it just means
# we should use defaults.
return None
try:
modpath, class_name = path.rsplit('.', 1)
module = __import__(modpath, fromlist=["none"])
return module.__dict__[class_name]
except ImportError:
trc = sys.exc_traceback
if not trc.tb_next:
# we separate between not finding the module, and finding a buggy one.
errstring = "Typeclass not found trying path '%s'." % path
else:
# a bug in the module is reported normally.
trc = traceback.format_exc()
errstring = "\n%sError importing '%s'." % (trc, path)
except (ValueError, TypeError):
errstring = "Malformed typeclass path '%s'." % path
except KeyError:
errstring = "No class '%s' was found in module '%s'."
errstring = errstring % (class_name, modpath)
except Exception:
trc = traceback.format_exc()
errstring = "\n%sException importing '%s'." % (trc, path)
# return the error.
return errstring
def _display_errmsg(self, message):
"""
Helper function to display error.
"""
if ServerConfig.objects.conf("server_starting_mode"):
print message.strip()
else:
_SA(self, "typeclass_last_errmsg", message.strip())
return
def _get_default_typeclass(self, cache=False, silent=False, save=False):
"""
This is called when a typeclass fails to
load for whatever reason.
Overload this in different entities.
Default operation is to load a default typeclass.
"""
defpath = _GA(self, "_default_typeclass_path")
typeclass = _GA(self, "_path_import")(defpath)
# if not silent:
# #errstring = "\n\nUsing Default class '%s'." % defpath
# _GA(self, "_display_errmsg")(errstring)
if not callable(typeclass):
# if typeclass still doesn't exist at this point, we're in trouble.
# fall back to hardcoded core class which is wrong for e.g. scripts/players etc.
failpath = defpath
defpath = "src.objects.objects.Object"
typeclass = _GA(self, "_path_import")(defpath)
if not silent:
#errstring = " %s\n%s" % (typeclass, errstring)
errstring = " Default class '%s' failed to load." % failpath
errstring += "\n Using Evennia's default class '%s'." % defpath
_GA(self, "_display_errmsg")(errstring)
if not callable(typeclass):
# if this is still giving an error, Evennia is wrongly configured or buggy
raise Exception("CRITICAL ERROR: The final fallback typeclass %s cannot load!!" % defpath)
typeclass = typeclass(self)
if save:
_SA(self, 'db_typeclass_path', defpath)
_GA(self, 'save')()
if cache:
_SA(self, "_cached_db_typeclass_path", defpath)
_SA(self, "_cached_typeclass", typeclass)
try:
typeclass.at_init()
except Exception:
logger.log_trace()
return typeclass
def is_typeclass(self, typeclass, exact=False):
"""
Returns true if this object has this type
OR has a typeclass which is an subclass of
the given typeclass. This operates on the actually
loaded typeclass (this is important since a failing
typeclass may instead have its default currently loaded)
typeclass - can be a class object or the
python path to such an object to match against.
exact - returns true only if the object's
type is exactly this typeclass, ignoring
parents.
"""
try:
typeclass = _GA(typeclass, "path")
except AttributeError:
pass
typeclasses = [typeclass] + ["%s.%s" % (path, typeclass) for path in _GA(self, "_typeclass_paths")]
if exact:
current_path = _GA(self.typeclass, "path") #"_GA(self, "_cached_db_typeclass_path")
return typeclass and any((current_path == typec for typec in typeclasses))
else:
# check parent chain
return any((cls for cls in self.typeclass.__class__.mro()
if any(("%s.%s" % (_GA(cls,"__module__"), _GA(cls,"__name__")) == typec for typec in typeclasses))))
def delete(self, *args, **kwargs):
"""
Type-level cleanup
"""
flush_attr_cache()
super(TypedObject, self).delete(*args, **kwargs)
#
# Object manipulation methods
#
#
def swap_typeclass(self, new_typeclass, clean_attributes=False, no_default=True):
"""
This performs an in-situ swap of the typeclass. This means
that in-game, this object will suddenly be something else.
Player will not be affected. To 'move' a player to a different
object entirely (while retaining this object's type), use
self.player.swap_object().
Note that this might be an error prone operation if the
old/new typeclass was heavily customized - your code
might expect one and not the other, so be careful to
bug test your code if using this feature! Often its easiest
to create a new object and just swap the player over to
that one instead.
Arguments:
new_typeclass (path/classobj) - type to switch to
clean_attributes (bool/list) - will delete all attributes
stored on this object (but not any
of the database fields such as name or
location). You can't get attributes back,
but this is often the safest bet to make
sure nothing in the new typeclass clashes
with the old one. If you supply a list,
only those named attributes will be cleared.
no_default - if this is active, the swapper will not allow for
swapping to a default typeclass in case the given
one fails for some reason. Instead the old one
will be preserved.
Returns:
boolean True/False depending on if the swap worked or not.
"""
if callable(new_typeclass):
# this is an actual class object - build the path
cls = new_typeclass
new_typeclass = "%s.%s" % (cls.__module__, cls.__name__)
else:
new_typeclass = "%s" % to_str(new_typeclass)
# Try to set the new path
# this will automatically save to database
old_typeclass_path = self.typeclass_path
_SA(self, "typeclass_path", new_typeclass.strip())
# this will automatically use a default class if
# there is an error with the given typeclass.
new_typeclass = self.typeclass
if self.typeclass_path != new_typeclass.path and no_default:
# something went wrong; the default was loaded instead,
# and we don't allow that; instead we return to previous.
_SA(self, "typeclass_path", old_typeclass_path)
return False
if clean_attributes:
# Clean out old attributes
if is_iter(clean_attributes):
for attr in clean_attributes:
self.attr(attr, delete=True)
for nattr in clean_attributes:
if hasattr(self.ndb, nattr):
self.nattr(nattr, delete=True)
else:
#print "deleting attrs ..."
for attr in self.get_all_attributes():
attr.delete()
for nattr in self.ndb.all:
del nattr
# run hooks for this new typeclass
new_typeclass.basetype_setup()
new_typeclass.at_object_creation()
return True
#
# Attribute handler methods
#
#
# Fully persistent attributes. You usually access these
# through the obj.db.attrname method.
# Helper methods for persistent attributes
def has_attribute(self, attribute_name):
"""
See if we have an attribute set on the object.
attribute_name: (str) The attribute's name.
"""
if not get_attr_cache(self, attribute_name):
attrib_obj = _GA(self, "_attribute_class").objects.filter(
db_obj=self, db_key__iexact=attribute_name)
if attrib_obj:
set_attr_cache(self, attribute_name, attrib_obj[0])
else:
return False
return True
def set_attribute(self, attribute_name, new_value=None, lockstring=""):
"""
Sets an attribute on an object. Creates the attribute if need
be.
attribute_name: (str) The attribute's name.
new_value: (python obj) The value to set the attribute to. If this is not
a str, the object will be stored as a pickle.
lockstring - this sets an access restriction on the attribute object. Note that
this is normally NOT checked - use the secureattr() access method
below to perform access-checked modification of attributes. Lock
types checked by secureattr are 'attrread','attredit','attrcreate'.
"""
attrib_obj = get_attr_cache(self, attribute_name)
if not attrib_obj:
attrclass = _GA(self, "_attribute_class")
# check if attribute already exists.
attrib_obj = attrclass.objects.filter(
db_obj=self, db_key__iexact=attribute_name)
if attrib_obj:
# use old attribute
attrib_obj = attrib_obj[0]
else:
# no match; create new attribute
attrib_obj = attrclass(db_key=attribute_name, db_obj=self)
if lockstring:
attrib_obj.locks.add(lockstring)
# re-set an old attribute value
try:
attrib_obj.value = new_value
except IntegrityError:
# this can happen if the cache was stale and the database object is
# missing. If so we need to clean self.hashid from the cache
flush_attr_cache(self)
self.delete()
raise IntegrityError("Attribute could not be saved - object %s was deleted from database." % self.key)
set_attr_cache(self, attribute_name, attrib_obj)
def get_attribute_obj(self, attribute_name, default=None):
"""
Get the actual attribute object named attribute_name
"""
attrib_obj = get_attr_cache(self, attribute_name)
if not attrib_obj:
attrib_obj = _GA(self, "_attribute_class").objects.filter(
db_obj=self, db_key__iexact=attribute_name)
if not attrib_obj:
return default
set_attr_cache(self, attribute_name, attrib_obj[0]) #query is first evaluated here
return attrib_obj[0]
return attrib_obj
def get_attribute(self, attribute_name, default=None):
"""
Returns the value of an attribute on an object. You may need to
type cast the returned value from this function since the attribute
can be of any type. Returns default if no match is found.
attribute_name: (str) The attribute's name.
default: What to return if no attribute is found
"""
attrib_obj = get_attr_cache(self, attribute_name)
if not attrib_obj:
attrib_obj = _GA(self, "_attribute_class").objects.filter(
db_obj=self, db_key__iexact=attribute_name)
if not attrib_obj:
return default
set_attr_cache(self, attribute_name, attrib_obj[0]) #query is first evaluated here
return attrib_obj[0].value
return attrib_obj.value
def get_attribute_raise(self, attribute_name):
"""
Returns value of an attribute. Raises AttributeError
if no match is found.
attribute_name: (str) The attribute's name.
"""
attrib_obj = get_attr_cache(self, attribute_name)
if not attrib_obj:
attrib_obj = _GA(self, "_attribute_class").objects.filter(
db_obj=self, db_key__iexact=attribute_name)
if not attrib_obj:
raise AttributeError
set_attr_cache(self, attribute_name, attrib_obj[0]) #query is first evaluated here
return attrib_obj[0].value
return attrib_obj.value
def del_attribute(self, attribute_name):
"""
Removes an attribute entirely.
attribute_name: (str) The attribute's name.
"""
attr_obj = get_attr_cache(self, attribute_name)
if attr_obj:
del_attr_cache(self, attribute_name)
attr_obj.delete()
else:
try:
_GA(self, "_attribute_class").objects.filter(
db_obj=self, db_key__iexact=attribute_name)[0].delete()
except IndexError:
pass
def del_attribute_raise(self, attribute_name):
"""
Removes and attribute. Raises AttributeError if
attribute is not found.
attribute_name: (str) The attribute's name.
"""
attr_obj = get_attr_cache(self, attribute_name)
if attr_obj:
del_attr_cache(self, attribute_name)
attr_obj.delete()
else:
try:
_GA(self, "_attribute_class").objects.filter(
db_obj=self, db_key__iexact=attribute_name)[0].delete()
except IndexError:
pass
raise AttributeError
def get_all_attributes(self):
"""
Returns all attributes defined on the object.
"""
return list(_GA(self,"_attribute_class").objects.filter(db_obj=self))
def attr(self, attribute_name=None, value=None, delete=False):
"""
This is a convenient wrapper for
get_attribute, set_attribute, del_attribute
and get_all_attributes.
If value is None, attr will act like
a getter, otherwise as a setter.
set delete=True to delete the named attribute.
Note that you cannot set the attribute
value to None using this method. Use set_attribute.
"""
if attribute_name == None:
# act as a list method
return self.get_all_attributes()
elif delete == True:
self.del_attribute(attribute_name)
elif value == None:
# act as a getter.
return self.get_attribute(attribute_name)
else:
# act as a setter
self.set_attribute(attribute_name, value)
def secure_attr(self, accessing_object, attribute_name=None, value=None, delete=False,
default_access_read=True, default_access_edit=True, default_access_create=True):
"""
This is a version of attr that requires the accessing object
as input and will use that to check eventual access locks on
the Attribute before allowing any changes or reads.
In the cases when this method wouldn't return, it will return
True for a successful operation, None otherwise.
locktypes checked on the Attribute itself:
attrread - control access to reading the attribute value
attredit - control edit/delete access
locktype checked on the object on which the Attribute is/will be stored:
attrcreate - control attribute create access (this is checked *on the object* not on the Attribute!)
default_access_* defines which access is assumed if no
suitable lock is defined on the Atttribute.
"""
if attribute_name == None:
# act as list method, but check access
return [attr for attr in self.get_all_attributes()
if attr.access(accessing_object, "attread", default=default_access_read)]
elif delete == True:
# act as deleter
attr = self.get_attribute_obj(attribute_name)
if attr and attr.access(accessing_object, "attredit", default=default_access_edit):
self.del_attribute(attribute_name)
return True
elif value == None:
# act as getter
attr = self.get_attribute_obj(attribute_name)
if attr and attr.access(accessing_object, "attrread", default=default_access_read):
return attr.value
else:
# act as setter
attr = self.get_attribute_obj(attribute_name)
if attr:
# attribute already exists
if attr.access(accessing_object, "attredit", default=default_access_edit):
self.set_attribute(attribute_name, value)
return True
else:
# creating a new attribute - check access on storing object!
if self.access(accessing_object, "attrcreate", default=default_access_create):
self.set_attribute(attribute_name, value)
return True
#@property
def __db_get(self):
"""
A second convenience wrapper for the the attribute methods. It
allows for the syntax
obj.db.attrname = value
and
value = obj.db.attrname
and
del obj.db.attrname
and
all_attr = obj.db.all (unless there is no attribute named 'all', in which
case that will be returned instead).
"""
try:
return self._db_holder
except AttributeError:
class DbHolder(object):
"Holder for allowing property access of attributes"
def __init__(self, obj):
_SA(self, 'obj', obj)
def __getattribute__(self, attrname):
if attrname == 'all':
# we allow to overload our default .all
attr = _GA(self, 'obj').get_attribute("all")
if attr:
return attr
return _GA(self, 'all')
return _GA(self, 'obj').get_attribute(attrname)
def __setattr__(self, attrname, value):
_GA(self, 'obj').set_attribute(attrname, value)
def __delattr__(self, attrname):
_GA(self, 'obj').del_attribute(attrname)
def get_all(self):
return _GA(self, 'obj').get_all_attributes()
all = property(get_all)
self._db_holder = DbHolder(self)
return self._db_holder
#@db.setter
def __db_set(self, value):
"Stop accidentally replacing the db object"
string = "Cannot assign directly to db object! "
string += "Use db.attr=value instead."
raise Exception(string)
#@db.deleter
def __db_del(self):
"Stop accidental deletion."
raise Exception("Cannot delete the db object!")
db = property(__db_get, __db_set, __db_del)
#
# NON-PERSISTENT storage methods
#
def nattr(self, attribute_name=None, value=None, delete=False):
"""
This is the equivalence of self.attr but for non-persistent
stores. Will not raise error but return None.
"""
if attribute_name == None:
# act as a list method
if callable(self.ndb.all):
return self.ndb.all()
else:
return [val for val in self.ndb.__dict__.keys()
if not val.startswith['_']]
elif delete == True:
if hasattr(self.ndb, attribute_name):
_DA(_GA(self, "db"), attribute_name)
elif value == None:
# act as a getter.
if hasattr(self.ndb, attribute_name):
_GA(_GA(self, "ndb"), attribute_name)
else:
return None
else:
# act as a setter
_SA(self.ndb, attribute_name, value)
#@property
def __ndb_get(self):
"""
A non-persistent store (ndb: NonDataBase). Everything stored
to this is guaranteed to be cleared when a server is shutdown.
Syntax is same as for the _get_db_holder() method and
property, e.g. obj.ndb.attr = value etc.
"""
try:
return self._ndb_holder
except AttributeError:
class NdbHolder(object):
"Holder for storing non-persistent attributes."
def get_all(self):
return [val for val in self.__dict__.keys()
if not val.startswith('_')]
all = property(get_all)
def __getattribute__(self, key):
# return None if no matching attribute was found.
try:
return _GA(self, key)
except AttributeError:
return None
def __setattr__(self, key, value):
# hook the oob handler here
call_ndb_hooks(self, key, value)
_SA(self, key, value)
self._ndb_holder = NdbHolder()
return self._ndb_holder
#@ndb.setter
def __ndb_set(self, value):
"Stop accidentally replacing the db object"
string = "Cannot assign directly to ndb object! "
string = "Use ndb.attr=value instead."
raise Exception(string)
#@ndb.deleter
def __ndb_del(self):
"Stop accidental deletion."
raise Exception("Cannot delete the ndb object!")
ndb = property(__ndb_get, __ndb_set, __ndb_del)
#
# Lock / permission methods
#
def access(self, accessing_obj, access_type='read', default=False):
"""
Determines if another object has permission to access.
accessing_obj - object trying to access this one
access_type - type of access sought
default - what to return if no lock of access_type was found
"""
return self.locks.check(accessing_obj, access_type=access_type, default=default)
def has_perm(self, accessing_obj, access_type):
"Alias to access"
logger.log_depmsg("has_perm() is deprecated. Use access() instead.")
return self.access(accessing_obj, access_type)
def check_permstring(self, permstring):
"""
This explicitly checks if we hold particular permission without involving
any locks.
"""
if hasattr(self, "player"):
if self.player and self.player.is_superuser: return True
else:
if self.is_superuser: return True
if not permstring:
return False
perm = permstring.lower()
if perm in [p.lower() for p in self.permissions]:
# simplest case - we have a direct match
return True
if perm in _PERMISSION_HIERARCHY:
# check if we have a higher hierarchy position
ppos = _PERMISSION_HIERARCHY.index(perm)
return any(True for hpos, hperm in enumerate(_PERMISSION_HIERARCHY)
if hperm in [p.lower() for p in self.permissions] and hpos > ppos)
return False
def flush_from_cache(self):
"""
Flush this object instance from cache, forcing an object reload. Note that this
will kill all temporary attributes on this object since it will be recreated
as a new Typeclass instance.
"""
self.__class__.flush_cached_instance(self)
|
bsd-3-clause
|
maloep/romcollectionbrowser
|
resources/lib/xbmc.py
|
1
|
35342
|
## @package xbmc
# Various classes and functions to interact with XBMC.
#
"""
Various classes and functions to interact with Kodi.
"""
from builtins import str
from builtins import object
import os
import xbmcgui as _xbmcgui
_loglevel = 1
_settings = {'external_filemanaging': 'true'}
_filename = 'dummy.log'
_logentry = 0
CAPTURE_FLAG_CONTINUOUS = 1
CAPTURE_FLAG_IMMEDIATELY = 2
CAPTURE_STATE_DONE = 3
CAPTURE_STATE_FAILED = 4
CAPTURE_STATE_WORKING = 0
DRIVE_NOT_READY = 1
ENGLISH_NAME = 2
ISO_639_1 = 0
ISO_639_2 = 1
LOGDEBUG = 0
LOGERROR = 4
LOGFATAL = 6
LOGINFO = 1
LOGNONE = 7
LOGNOTICE = 2
LOGSEVERE = 5
LOGWARNING = 3
PLAYER_CORE_AUTO = 0
PLAYER_CORE_DVDPLAYER = 1
PLAYER_CORE_MPLAYER = 2
PLAYER_CORE_PAPLAYER = 3
PLAYLIST_MUSIC = 0
PLAYLIST_VIDEO = 1
SERVER_AIRPLAYSERVER = 2
SERVER_EVENTSERVER = 6
SERVER_JSONRPCSERVER = 3
SERVER_UPNPRENDERER = 4
SERVER_UPNPSERVER = 5
SERVER_WEBSERVER = 1
SERVER_ZEROCONF = 7
TRAY_CLOSED_MEDIA_PRESENT = 96
TRAY_CLOSED_NO_MEDIA = 64
TRAY_OPEN = 16
__author__ = 'Team Kodi <http://kodi.tv>'
__credits__ = 'Team Kodi'
__date__ = 'Fri May 01 16:22:03 BST 2015'
__platform__ = 'ALL'
__version__ = '2.20.0'
abortRequested = False
"""Returns ``True`` if Kodi prepares to close itself"""
def _write_to_file(msg):
global _filename
global _logentry
filepath = os.path.join(os.getcwd(), _filename)
if _logentry == 0: mode = 'w'
else: mode = 'a'
fh = open(filepath, mode)
fh.write('%003d -- %s\n' % (_logentry, msg))
fh.close()
_logentry += 1
class Keyboard(object):
"""
Creates a new Keyboard object with default text heading and hidden input flag if supplied.
:param line: string - default text entry.
:param heading: string - keyboard heading.
:param hidden: boolean - True for hidden text entry.
Example::
kb = xbmc.Keyboard('default', 'heading', True)
kb.setDefault('password') # optional
kb.setHeading('Enter password') # optional
kb.setHiddenInput(True) # optional
kb.doModal()
if (kb.isConfirmed()):
text = kb.getText()
"""
def __init__(self, line='', heading='', hidden=False):
"""
Creates a new Keyboard object with default text heading and hidden input flag if supplied.
line: string - default text entry.
heading: string - keyboard heading.
hidden: boolean - True for hidden text entry.
Example:
kb = xbmc.Keyboard('default', 'heading', True)
kb.setDefault('password') # optional
kb.setHeading('Enter password') # optional
kb.setHiddenInput(True) # optional
kb.doModal()
if (kb.isConfirmed()):
text = kb.getText()
"""
pass
def doModal(self, autoclose=0):
"""Show keyboard and wait for user action.
:param autoclose: integer - milliseconds to autoclose dialog.
.. note::
autoclose = 0 - This disables autoclose
Example::
kb.doModal(30000)
"""
pass
def setDefault(self, line=''):
"""Set the default text entry.
:param line: string - default text entry.
Example::
kb.setDefault('password')
"""
pass
def setHiddenInput(self, hidden=False):
"""Allows hidden text entry.
:param hidden: boolean - ``True`` for hidden text entry.
Example::
kb.setHiddenInput(True)
"""
pass
def setHeading(self, heading):
"""Set the keyboard heading.
:param heading: string - keyboard heading.
Example::
kb.setHeading('Enter password')
"""
pass
def getText(self):
"""Returns the user input as a string.
:return: entered text
.. note::
This will always return the text entry even if you cancel the keyboard.
Use the isConfirmed() method to check if user cancelled the keyboard.
"""
return str()
def isConfirmed(self):
"""Returns ``False`` if the user cancelled the input.
:return: confirmed status
example::
if (kb.isConfirmed()):
pass
"""
return bool(1)
class Player(object):
"""
Player()
Creates a new Player with as default the xbmc music playlist.
.. note:: currently Player class constructor does not take any parameters.
Kodi automatically selects a necessary player.
"""
def __init__(self):
"""
Creates a new Player with as default the xbmc music playlist.
"""
pass
def play(self, item=None, listitem=None, windowed=False, statrpos=-1):
"""
Play this item.
:param item: [opt] string - filename, url or playlist.
:param listitem: [opt] listitem - used with setInfo() to set different infolabels.
:param windowed: [opt] bool - true=play video windowed, false=play users preference.(default)
:param startpos: [opt] int - starting position when playing a playlist. Default = -1
.. note:: If item is not given then the Player will try to play the current item
in the current playlist.
You can use the above as keywords for arguments and skip certain optional arguments.
Once you use a keyword, all following arguments require the keyword.
example::
listitem = xbmcgui.ListItem('Ironman')
listitem.setInfo('video', {'Title': 'Ironman', 'Genre': 'Science Fiction'})
xbmc.Player().play(url, listitem, windowed)
xbmc.Player().play(playlist, listitem, windowed, startpos)
"""
pass
def stop(self):
"""Stop playing."""
pass
def pause(self):
"""Pause or resume playing if already paused."""
pass
def playnext(self):
"""Play next item in playlist."""
pass
def playprevious(self):
"""Play previous item in playlist."""
pass
def playselected(self, selected):
"""Play a certain item from the current playlist."""
pass
def onPlayBackStarted(self):
"""Will be called when xbmc starts playing a file."""
pass
def onPlayBackEnded(self):
"""Will be called when xbmc stops playing a file."""
pass
def onPlayBackStopped(self):
"""Will be called when user stops xbmc playing a file."""
def onPlayBackPaused(self):
"""Will be called when user pauses a playing file."""
pass
def onPlayBackResumed(self):
"""Will be called when user resumes a paused file."""
pass
def onPlayBackSeek(self, time, seekOffset):
"""
onPlayBackSeek method.
:param time: integer - time to seek to.
:param seekOffset: integer - ?.
Will be called when user seeks to a time
"""
pass
def onPlayBackSeekChapter(self, chapter):
"""
onPlayBackSeekChapter method.
:param chapter: integer - chapter to seek to.
Will be called when user performs a chapter seek
"""
pass
def onPlayBackSpeedChanged(self, speed):
"""
onPlayBackSpeedChanged(speed) -- onPlayBackSpeedChanged method.
:param speed: integer - current speed of player.
.. note:: negative speed means player is rewinding, 1 is normal playback speed.
Will be called when players speed changes. (eg. user FF/RW)
"""
pass
def onQueueNextItem(self):
"""
onQueueNextItem method.
Will be called when player requests next item
"""
pass
def isPlaying(self):
"""Returns ``True`` is xbmc is playing a file."""
return bool(1)
def isPlayingAudio(self):
"""Returns ``True`` is xbmc is playing an audio file."""
return bool(1)
def isPlayingVideo(self):
"""Returns ``True`` if xbmc is playing a video."""
return bool(1)
def getPlayingFile(self):
"""
returns the current playing file as a string.
.. note:: For LiveTV, returns a pvr:// url which is not translatable to an OS specific file or external url
:raises: Exception, if player is not playing a file.
"""
return str()
def getVideoInfoTag(self):
"""Returns the VideoInfoTag of the current playing Movie.
:raises: Exception: If player is not playing a file or current file is not a movie file.
.. note:: This doesn't work yet, it's not tested.
"""
return InfoTagVideo()
def getMusicInfoTag(self):
"""Returns the MusicInfoTag of the current playing 'Song'.
:raises: Exception: If player is not playing a file or current file is not a music file.
"""
return InfoTagMusic()
def getTotalTime(self):
"""Returns the total time of the current playing media in seconds.
This is only accurate to the full second.
:raises: Exception: If player is not playing a file.
"""
return float()
def getTime(self):
"""Returns the current time of the current playing media as fractional seconds.
:raises: Exception: If player is not playing a file.
"""
return float()
def seekTime(self, pTime):
"""Seeks the specified amount of time as fractional seconds.
The time specified is relative to the beginning of the currently playing media file.
:raises: Exception: If player is not playing a file.
"""
pass
def setSubtitles(self, subtitleFile):
"""Set subtitle file and enable subtitles.
:param subtitleFile: string or unicode - Path to subtitle.
Example::
setSubtitles('/path/to/subtitle/test.srt')
"""
pass
def getSubtitles(self):
"""Get subtitle stream name."""
return str()
def getAvailableAudioStreams(self):
"""Get audio stream names."""
return list()
def getAvailableSubtitleStreams(self):
"""
get Subtitle stream names
"""
return list()
def setAudioStream(self, iStream):
"""Set audio stream.
:param iStream: int
"""
pass
def setSubtitleStream(self, iStream):
"""
set Subtitle Stream
:param iStream: int
example::
setSubtitleStream(1)
"""
pass
def showSubtitles(self, bVisible):
"""
enable/disable subtitles
:param bVisible: boolean - ``True`` for visible subtitles.
example::
xbmc.Player().showSubtitles(True)
"""
pass
class PlayList(object):
"""Retrieve a reference from a valid xbmc playlist
:param playlist: int - can be one of the next values:
::
0: xbmc.PLAYLIST_MUSIC
1: xbmc.PLAYLIST_VIDEO
Use PlayList[int position] or __getitem__(int position) to get a PlayListItem.
"""
def __init__(self, playList):
"""Retrieve a reference from a valid xbmc playlist
playlist: int - can be one of the next values:
::
0: xbmc.PLAYLIST_MUSIC
1: xbmc.PLAYLIST_VIDEO
Use PlayList[int position] or __getitem__(int position) to get a PlayListItem.
"""
pass
def __getitem__(self, item):
"""x.__getitem__(y) <==> x[y]"""
return _xbmcgui.ListItem()
def __len__(self):
"""x.__len__() <==> len(x)"""
return int()
def add(self, url, listitem=None, index=-1):
"""Adds a new file to the playlist.
:param url: string or unicode - filename or url to add.
:param listitem: listitem - used with setInfo() to set different infolabels.
:param index: integer - position to add playlist item.
Example::
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
video = 'F:\\movies\\Ironman.mov'
listitem = xbmcgui.ListItem('Ironman', thumbnailImage='F:\\movies\\Ironman.tbn')
listitem.setInfo('video', {'Title': 'Ironman', 'Genre': 'Science Fiction'})
playlist.add(url=video, listitem=listitem, index=7)
"""
pass
def load(self, filename):
"""Load a playlist.
Clear current playlist and copy items from the file to this Playlist.
filename can be like .pls or .m3u ...
:param filename:
:return: ``False`` if unable to load playlist, True otherwise.
"""
return bool(1)
def remove(self, filename):
"""Remove an item with this filename from the playlist.
:param filename:
"""
pass
def clear(self):
"""Clear all items in the playlist."""
pass
def shuffle(self):
"""Shuffle the playlist."""
pass
def unshuffle(self):
"""Unshuffle the playlist."""
pass
def size(self):
"""Returns the total number of PlayListItems in this playlist."""
return int()
def getposition(self):
"""Returns the position of the current song in this playlist."""
return int()
def getPlayListId(self):
"""getPlayListId() --returns an integer."""
return int()
class PlayListItem(object):
"""Creates a new PlaylistItem which can be added to a PlayList."""
def getdescription(self):
"""Returns the description of this PlayListItem."""
return str()
def getduration(self):
"""Returns the duration of this PlayListItem."""
return int()
def getfilename(self):
"""Returns the filename of this PlayListItem."""
return str()
class InfoTagMusic(object):
"""InfoTagMusic class"""
def getURL(self):
"""Returns a string."""
return str()
def getTitle(self):
"""Returns a string."""
return str()
def getArtist(self):
"""Returns a string."""
return str()
def getAlbumArtist(self):
"""Returns a string."""
return str()
def getAlbum(self):
"""Returns a string."""
return str()
def getGenre(self):
"""Returns a string."""
return str()
def getDuration(self):
"""Returns an integer."""
return int()
def getTrack(self):
"""Returns an integer."""
return int()
def getDisc(self):
"""Returns an integer."""
return int()
def getTrackAndDisc(self):
"""Returns an integer."""
return int()
def getReleaseDate(self):
"""Returns a string."""
return str()
def getListeners(self):
"""Returns an integer."""
return int()
def getPlayCount(self):
"""Returns an integer."""
return int()
def getLastPlayed(self):
"""Returns a string."""
return str()
def getComment(self):
"""Returns a string."""
return str()
def getLyrics(self):
"""Returns a string."""
return str()
class InfoTagVideo(object):
"""InfoTagVideo class"""
def getDirector(self):
"""Returns a string."""
return str()
def getWritingCredits(self):
"""Returns a string."""
return str()
def getGenre(self):
"""Returns a string."""
return str()
def getTagLine(self):
"""Returns a string."""
return str()
def getPlotOutline(self):
"""Returns a string."""
return str()
def getPlot(self):
"""Returns a string."""
return str()
def getPictureURL(self):
"""Returns a string."""
return str()
def getTitle(self):
"""Returns a string."""
return str()
def getOriginalTitle(self):
"""Returns a string."""
return str()
def getVotes(self):
"""Returns a string."""
return str()
def getCast(self):
"""Returns a string."""
return str()
def getFile(self):
"""Returns a string."""
return str()
def getPath(self):
"""Returns a string."""
return str()
def getIMDBNumber(self):
"""Returns a string."""
return str()
def getYear(self):
"""Returns an integer."""
return int()
def getPremiered(self):
"""Returns a string."""
return str()
def getFirstAired(self):
"""Returns a string."""
return str()
def getRating(self):
"""Returns a float."""
return float()
def getPlayCount(self):
"""Returns an integer."""
return int()
def getLastPlayed(self):
"""Returns a string."""
return str()
def getTVShowTitle(self):
"""Returns a string."""
return str()
def getMediaType(self):
"""Returns a string."""
return str()
def getSeason(self):
"""Returns an int."""
return int()
def getEpisode(self):
"""Returns an int."""
return int()
class Monitor(object):
"""
Monitor class.
Creates a new Monitor to notify addon about changes.
"""
def onScreensaverActivated(self):
"""
onScreensaverActivated method.
Will be called when screensaver kicks in
"""
pass
def onScreensaverDeactivated(self):
"""
onScreensaverDeactivated method.
Will be called when screensaver goes off
"""
pass
def onSettingsChanged(self):
"""
onSettingsChanged method.
Will be called when addon settings are changed
"""
pass
def onNotification(self, sender, method, data):
"""
onNotification method.
:param sender: str - sender of the notification
:param method: str - name of the notification
:param data: str - JSON-encoded data of the notification
Will be called when Kodi receives or sends a notification
"""
pass
def onCleanStarted(self, library):
"""
onCleanStarted method.
:param library: video/music as string
Will be called when library clean has started
and return video or music to indicate which library is being cleaned
"""
pass
def onCleanFinished(self, library):
"""
onCleanFinished method.
:param library: video/music as string
Will be called when library clean has ended
and return video or music to indicate which library has been cleaned
"""
pass
def onDPMSActivated(self):
"""
onDPMSActivated method.
Will be called when energysaving/DPMS gets active
"""
pass
def onDPMSDeactivated(self):
"""
onDPMSDeactivated method.
Will be called when energysaving/DPMS is turned off
"""
pass
def onScanFinished(self, library):
"""
onScanFinished method.
:param library: video/music as string
Will be called when library scan has ended
and return video or music to indicate which library has been scanned
"""
pass
def onScanStarted(self, library):
"""
onScanStarted method.
:param library: video/music as string
Will be called when library scan has started
and return video or music to indicate which library is being scanned
"""
pass
def waitForAbort(self, timeout=-1):
"""
Block until abort is requested, or until timeout occurs.
If an abort requested have already been made, return immediately.
Returns ``True`` when abort have been requested,
``False`` if a timeout is given and the operation times out.
:param timeout: float - (optional) timeout in seconds. Default: no timeout.
:return: bool
"""
return bool(0)
def abortRequested(self):
"""
Returns ``True`` if abort has been requested.
"""
return bool(0)
class RenderCapture(object):
"""RenerCapture class"""
def capture(self, width, height, flags=0):
"""
Issue capture request.
:param width: Width capture image should be rendered to
:param height: Height capture image should should be rendered to
:param flags: Optional. Flags that control the capture processing.
The value for 'flags' could be or'ed from the following constants:
- ``xbmc.CAPTURE_FLAG_CONTINUOUS``: after a capture is done,
issue a new capture request immediately
- ``xbmc.CAPTURE_FLAG_IMMEDIATELY``: read out immediately whencapture() is called,
this can cause a busy wait
.. warning:: As of Kodi 17.x (Krypton) ``flags`` option will be deprecated.
"""
pass
def getAspectRatio(self):
"""
:return: aspect ratio of currently displayed video as a float number.
"""
return float()
def getCaptureState(self):
"""
:return: processing state of capture request.
The returned value could be compared against the following constants::
- ``xbmc.CAPTURE_STATE_WORKING``: Capture request in progress.
- ``xbmc.CAPTURE_STATE_DONE``: Capture request done. The image could be retrieved withgetImage()
- ``xbmc.CAPTURE_STATE_FAILED``: Capture request failed.
.. warning:: Will be deprecated in Kodi 17.x (Krypton)
"""
return int()
def getHeight(self):
"""
:return: height of captured image.
"""
return int()
def getImage(self, msecs=0):
"""
Get image
:param msecs: wait time in msec
:return: captured image as a bytearray.
.. note:: ``msec`` param will be added in Kodi 17.x (Krypton).
The size of the image isgetWidth() * getHeight() * 4
"""
return bytearray()
def getImageFormat(self):
"""
:return: format of captured image: 'BGRA' or 'RGBA'.
.. note:: As of Kodi 17.x (Krypton) 'BRRA' will always be returned
"""
return str()
def getWidth(self):
"""
:return: width of captured image.
"""
return int()
def waitForCaptureStateChangeEvent(self, msecs=0):
"""
wait for capture state change event
:param msecs: Milliseconds to wait. Waits forever if not specified.
The method will return ``1`` if the Event was triggered. Otherwise it will return ``0``.
"""
return int()
def audioResume():
"""
Resume Audio engine.
example::
xbmc.audioResume()
"""
pass
def audioSuspend():
"""
Suspend Audio engine.
example::
xbmc.audioSuspend()
"""
pass
def convertLanguage(language, format):
"""
Returns the given language converted to the given format as a string.
:param language: string either as name in English, two letter code (ISO 639-1),
or three letter code (ISO 639-2/T(B)
:param format: format of the returned language string:
- ``xbmc.ISO_639_1``: two letter code as defined in ISO 639-1
- ``xbmc.ISO_639_2``: three letter code as defined in ISO 639-2/T or ISO 639-2/B
- ``xbmc.ENGLISH_NAME``: full language name in English (default)
example::
language = xbmc.convertLanguage(English, xbmc.ISO_639_2)
"""
return str()
def enableNavSounds(yesNo):
"""
Enables/Disables nav sounds
:param yesNo: enable (``True``) or disable (``False``) nav sounds
example::
xbmc.enableNavSounds(True)
"""
pass
def executeJSONRPC(jsonrpccommand):
"""
Execute an JSONRPC command.
:param jsonrpccommand: string - jsonrpc command to execute.
List of commands: http://wiki.xbmc.org/?title=JSON-RPC_API
example::
response = xbmc.executeJSONRPC('{ "jsonrpc": "2.0", "method": "JSONRPC.Introspect", "id": 1 }')
"""
return str()
def executebuiltin(function, wait=False):
"""
Execute a built in XBMC function.
:param function: string - builtin function to execute.
List of functions: http://wiki.xbmc.org/?title=List_of_Built_In_Functions
example::
xbmc.executebuiltin('XBMC.RunXBE(c:\avalaunch.xbe)')
"""
pass
def executescript(script):
"""
Execute a python script.
:param script: string - script filename to execute.
example::
xbmc.executescript('special://home/scripts/update.py')
"""
pass
def getCacheThumbName(path):
"""
Returns a thumb cache filename.
:param path: string or unicode -- path to file
Example::
thumb = xbmc.getCacheThumbName('f:\\videos\\movie.avi')
"""
return str()
def getCleanMovieTitle(path, usefoldername=False):
"""
Returns a clean movie title and year string if available.
:param path: string or unicode - String to clean
:param usefoldername: [opt] bool - use folder names (defaults to ``False``)
example::
title, year = xbmc.getCleanMovieTitle('/path/to/moviefolder/test.avi', True)
"""
return str(), str()
def getCondVisibility(condition):
"""
Returns ``True`` (``1``) or ``False`` (``0``) as a ``bool``.
:param condition: string - condition to check.
List of Conditions: http://wiki.xbmc.org/?title=List_of_Boolean_Conditions
.. note:: You can combine two (or more) of the above settings by using "+" as an ``AND`` operator,
"|" as an ``OR`` operator, "!" as a ``NOT`` operator, and "[" and "]" to bracket expressions.
example::
visible = xbmc.getCondVisibility('[Control.IsVisible(41) + !Control.IsVisible(12)]')
"""
return bool(1)
def getDVDState():
"""
Returns the dvd state as an integer.
return values are:
- 1 : ``xbmc.DRIVE_NOT_READY``
- 16 : ``xbmc.TRAY_OPEN``
- 64 : ``xbmc.TRAY_CLOSED_NO_MEDIA``
- 96 : ``xbmc.TRAY_CLOSED_MEDIA_PRESENT``
example::
dvdstate = xbmc.getDVDState()
"""
return int()
def getFreeMem():
"""
Returns the amount of free memory in MB as an integer.
example::
freemem = xbmc.getFreeMem()
"""
return int()
def getGlobalIdleTime():
"""
Returns the elapsed idle time in seconds as an integer.
example::
t = xbmc.getGlobalIdleTime()
"""
return int()
def getIPAddress():
"""
Returns the current ip address as a string.
example::
ip = xbmc.getIPAddress()
"""
return str()
def getInfoImage(infotag):
"""
Returns a filename including path to the InfoImage's thumbnail as a string.
:param infotag: string - infotag for value you want returned.
List of InfoTags: http://wiki.xbmc.org/?title=InfoLabels
example::
filename = xbmc.getInfoImage('Weather.Conditions')
"""
return str()
def getInfoLabel(cLine):
"""
Returns an InfoLabel as a string.
:param cLine: string - infoTag for value you want returned.
List of InfoTags: http://wiki.xbmc.org/?title=InfoLabels
example::
label = xbmc.getInfoLabel('Weather.Conditions')
"""
if(cLine == 'System.BuildVersion'):
return '17.6'
return str()
def getLanguage(format=ENGLISH_NAME, region=False):
"""
Returns the active language as a string.
:param format: [opt] format of the returned language string
- ``xbmc.ISO_639_1``: two letter code as defined in ISO 639-1
- ``xbmc.ISO_639_2``: three letter code as defined in ISO 639-2/T or ISO 639-2/B
- ``xbmc.ENGLISH_NAME``: full language name in English (default)
:param region: [opt] append the region delimited by "-" of the language (setting)
to the returned language string
example::
language = xbmc.getLanguage(xbmc.ENGLISH_NAME)
"""
return str()
def getLocalizedString(id):
"""
Returns a localized 'unicode string'.
:param id: integer -- id# for string you want to localize.
.. note:: See strings.po in language folders for which id you need for a string.
example::
locstr = xbmc.getLocalizedString(6)
"""
return str()
def getRegion(id):
"""
Returns your regions setting as a string for the specified id.
:param id: string - id of setting to return
.. note:: choices are (dateshort, datelong, time, meridiem, tempunit, speedunit)
You can use the above as keywords for arguments.
example::
date_long_format = xbmc.getRegion('datelong')
"""
return str()
def getSkinDir():
"""
Returns the active skin directory as a string.
.. note:: This is not the full path like ``'special://home/addons/skin.confluence'``,
but only ``'skin.confluence'``.
example::
skindir = xbmc.getSkinDir()
"""
return str()
def getSupportedMedia(mediaType):
"""
Returns the supported file types for the specific media as a string.
:param mediaType: string - media type
.. note:: media type can be (video, music, picture).
The return value is a pipe separated string of filetypes (eg. '.mov|.avi').
You can use the above as keywords for arguments.
example::
mTypes = xbmc.getSupportedMedia('video')
"""
return str()
def log(msg, level=LOGDEBUG):
"""
Write a string to XBMC's log file and the debug window.
:param msg: string - text to output.
:param level: [opt] integer - log level to ouput at. (default: ``LOGDEBUG``)
.. note:: You can use the above as keywords for arguments and skip certain optional arguments.
Once you use a keyword, all following arguments require the keyword.
Text is written to the log for the following conditions.
- XBMC loglevel == -1 (NONE, nothing at all is logged)
- XBMC loglevel == 0 (NORMAL, shows LOGNOTICE, LOGERROR, LOGSEVERE and LOGFATAL) * XBMC loglevel == 1
(DEBUG, shows all)
See pydocs for valid values for level.
example::
xbmc.log('This is a test string.', level=xbmc.LOGDEBUG)
"""
global _loglevel
if _loglevel == -1:
return
elif _loglevel == 0:
if level == LOGNOTICE:
msg = '%s: %s' % (level, msg)
print(msg)
_write_to_file(msg)
else:
msg = '%s: %s' % (level, msg)
print(msg)
_write_to_file(msg)
def makeLegalFilename(filename, fatX=True):
"""
Returns a legal filename or path as a string.
:param filename: string or unicode -- filename/path to make legal
:param fatX: [opt] bool -- ``True`` = Xbox file system(Default)
.. note: If fatX is ``True`` you should pass a full path.
If fatX is ``False`` only pass the basename of the path.
You can use the above as keywords for arguments and skip certain optional arguments.
Once you use a keyword, all following arguments require the keyword.
Example::
filename = xbmc.makeLegalFilename('F: Age: The Meltdown.avi')
"""
return str()
def playSFX(filename, useCached=True):
"""
Plays a wav file by filename
:param filename: string - filename of the wav file to play.
:param useCached: [opt] bool - False = Dump any previously cached wav associated with filename
example::
xbmc.playSFX('special://xbmc/scripts/dingdong.wav')
xbmc.playSFX('special://xbmc/scripts/dingdong.wav',False)
"""
pass
def stopSFX():
"""
Stops wav file
example::
xbmc.stopSFX()
"""
pass
def restart():
"""
Restart the htpc.
example::
xbmc.restart()
"""
pass
def shutdown():
"""
Shutdown the htpc.
example::
xbmc.shutdown()
"""
pass
def skinHasImage(image):
"""
Returns ``True`` if the image file exists in the skin.
:param image: string - image filename
.. note:: If the media resides in a subfolder include it.
(eg. home-myfiles\home-myfiles2.png). You can use the above as keywords for arguments.
example::
exists = xbmc.skinHasImage('ButtonFocusedTexture.png')
"""
return bool(1)
def sleep(timemillis):
"""
Sleeps for 'time' msec.
:param timemillis: integer - number of msec to sleep.
.. note: This is useful if you have for example aPlayer class that is waiting
for onPlayBackEnded() calls.
:raises: TypeError, if time is not an integer.
Example::
xbmc.sleep(2000) # sleeps for 2 seconds
"""
pass
def startServer(iTyp, bStart, bWait=False):
"""
start or stop a server.
:param iTyp: integer -- use SERVER_* constants
:param bStart: bool -- start (True) or stop (False) a server
:param bWait: [opt] bool -- wait on stop before returning (not supported by all servers)
:return: bool -- ``True`` or ``False``
Example::
xbmc.startServer(xbmc.SERVER_AIRPLAYSERVER, False)
"""
pass
def translatePath(path):
"""
Returns the translated path.
:param path: string or unicode - Path to format
.. note: Only useful if you are coding for both Linux and Windows.
Converts ``'special://masterprofile/script_data'`` -> ``'/home/user/XBMC/UserData/script_data'`` on Linux.
Example::
fpath = xbmc.translatePath('special://masterprofile/script_data')
"""
b, t = os.path.split(path)
return os.path.join(os.getcwd(), t)
def validatePath(path):
"""
Returns the validated path.
:param path: string or unicode - Path to format
.. note:: Only useful if you are coding for both Linux and Windows for fixing slash problems.
e.g. Corrects 'Z://something' -> 'Z:'
Example::
fpath = xbmc.validatePath(somepath)
"""
return str()
|
gpl-2.0
|
jaredly/pyjamas
|
pygtkweb/demos/047-treeviewdnd.py
|
13
|
4001
|
#!/usr/bin/env python
# example treeviewdnd.py
import pygtk
pygtk.require('2.0')
import gtk
class TreeViewDnDExample:
TARGETS = [
('MY_TREE_MODEL_ROW', gtk.TARGET_SAME_WIDGET, 0),
('text/plain', 0, 1),
('TEXT', 0, 2),
('STRING', 0, 3),
]
# close the window and quit
def delete_event(self, widget, event, data=None):
gtk.main_quit()
return False
def clear_selected(self, button):
selection = self.treeview.get_selection()
model, iter = selection.get_selected()
if iter:
model.remove(iter)
return
def __init__(self):
# Create a new window
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.set_title("URL Cache")
self.window.set_size_request(200, 200)
self.window.connect("delete_event", self.delete_event)
self.scrolledwindow = gtk.ScrolledWindow()
self.vbox = gtk.VBox()
self.hbox = gtk.HButtonBox()
self.vbox.pack_start(self.scrolledwindow, True)
self.vbox.pack_start(self.hbox, False)
self.b0 = gtk.Button('Clear All')
self.b1 = gtk.Button('Clear Selected')
self.hbox.pack_start(self.b0)
self.hbox.pack_start(self.b1)
# create a liststore with one string column to use as the model
self.liststore = gtk.ListStore(str)
# create the TreeView using liststore
self.treeview = gtk.TreeView(self.liststore)
# create a CellRenderer to render the data
self.cell = gtk.CellRendererText()
# create the TreeViewColumns to display the data
self.tvcolumn = gtk.TreeViewColumn('URL', self.cell, text=0)
# add columns to treeview
self.treeview.append_column(self.tvcolumn)
self.b0.connect_object('clicked', gtk.ListStore.clear, self.liststore)
self.b1.connect('clicked', self.clear_selected)
# make treeview searchable
self.treeview.set_search_column(0)
# Allow sorting on the column
self.tvcolumn.set_sort_column_id(0)
# Allow enable drag and drop of rows including row move
self.treeview.enable_model_drag_source( gtk.gdk.BUTTON1_MASK,
self.TARGETS,
gtk.gdk.ACTION_DEFAULT|
gtk.gdk.ACTION_MOVE)
self.treeview.enable_model_drag_dest(self.TARGETS,
gtk.gdk.ACTION_DEFAULT)
self.treeview.connect("drag_data_get", self.drag_data_get_data)
self.treeview.connect("drag_data_received",
self.drag_data_received_data)
self.scrolledwindow.add(self.treeview)
self.window.add(self.vbox)
self.window.show_all()
def drag_data_get_data(self, treeview, context, selection, target_id,
etime):
treeselection = treeview.get_selection()
model, iter = treeselection.get_selected()
data = model.get_value(iter, 0)
selection.set(selection.target, 8, data)
def drag_data_received_data(self, treeview, context, x, y, selection,
info, etime):
model = treeview.get_model()
data = selection.data
drop_info = treeview.get_dest_row_at_pos(x, y)
if drop_info:
path, position = drop_info
iter = model.get_iter(path)
if (position == gtk.TREE_VIEW_DROP_BEFORE
or position == gtk.TREE_VIEW_DROP_INTO_OR_BEFORE):
model.insert_before(iter, [data])
else:
model.insert_after(iter, [data])
else:
model.append([data])
if context.action == gtk.gdk.ACTION_MOVE:
context.finish(True, True, etime)
return
def main():
gtk.main()
if __name__ == "__main__":
treeviewdndex = TreeViewDnDExample()
main()
|
apache-2.0
|
dhorelik/django-cms
|
cms/admin/forms.py
|
35
|
30651
|
# -*- coding: utf-8 -*-
from django import forms
from django.contrib.auth import get_user_model, get_permission_codename
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.db.models.fields import BooleanField
try:
from django.forms.utils import ErrorList
except ImportError:
from django.forms.util import ErrorList
from django.forms.widgets import HiddenInput
from django.template.defaultfilters import slugify
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _, get_language
from cms.apphook_pool import apphook_pool
from cms.constants import PAGE_TYPES_ID
from cms.forms.widgets import UserSelectAdminWidget, AppHookSelect, ApplicationConfigSelect
from cms.models import (Page, PagePermission, PageUser, ACCESS_PAGE, PageUserGroup, Title,
EmptyTitle, GlobalPagePermission)
from cms.utils.compat.forms import UserCreationForm
from cms.utils.conf import get_cms_setting
from cms.utils.i18n import get_language_tuple
from cms.utils.mail import mail_page_user_change
from cms.utils.page import is_valid_page_slug
from cms.utils.page_resolver import is_valid_url
from cms.utils.permissions import (get_current_user, get_subordinate_users,
get_subordinate_groups,
get_user_permission_level)
from menus.menu_pool import menu_pool
def get_permission_accessor(obj):
User = get_user_model()
if isinstance(obj, (PageUser, User,)):
rel_name = 'user_permissions'
else:
rel_name = 'permissions'
return getattr(obj, rel_name)
def save_permissions(data, obj):
models = (
(Page, 'page'),
(PageUser, 'pageuser'),
(PageUserGroup, 'pageuser'),
(PagePermission, 'pagepermission'),
)
if not obj.pk:
# save obj, otherwise we can't assign permissions to him
obj.save()
permission_accessor = get_permission_accessor(obj)
for model, name in models:
content_type = ContentType.objects.get_for_model(model)
for key in ('add', 'change', 'delete'):
# add permission `key` for model `model`
codename = get_permission_codename(key, model._meta)
permission = Permission.objects.get(content_type=content_type, codename=codename)
if data.get('can_%s_%s' % (key, name), None):
permission_accessor.add(permission)
else:
permission_accessor.remove(permission)
class PageForm(forms.ModelForm):
language = forms.ChoiceField(label=_("Language"), choices=get_language_tuple(),
help_text=_('The current language of the content fields.'))
page_type = forms.ChoiceField(label=_("Page type"), required=False)
title = forms.CharField(label=_("Title"), widget=forms.TextInput(),
help_text=_('The default title'))
slug = forms.CharField(label=_("Slug"), widget=forms.TextInput(),
help_text=_('The part of the title that is used in the URL'))
menu_title = forms.CharField(label=_("Menu Title"), widget=forms.TextInput(),
help_text=_('Overwrite what is displayed in the menu'), required=False)
page_title = forms.CharField(label=_("Page Title"), widget=forms.TextInput(),
help_text=_('Overwrites what is displayed at the top of your browser or in bookmarks'),
required=False)
meta_description = forms.CharField(label=_('Description meta tag'), required=False,
widget=forms.Textarea(attrs={'maxlength': '155', 'rows': '4'}),
help_text=_('A description of the page used by search engines.'),
max_length=155)
class Meta:
model = Page
fields = ["parent", "site", 'template']
def __init__(self, *args, **kwargs):
super(PageForm, self).__init__(*args, **kwargs)
self.fields['parent'].widget = HiddenInput()
self.fields['site'].widget = HiddenInput()
self.fields['template'].widget = HiddenInput()
self.fields['language'].widget = HiddenInput()
if not self.fields['site'].initial:
self.fields['site'].initial = Site.objects.get_current().pk
site_id = self.fields['site'].initial
languages = get_language_tuple(site_id)
self.fields['language'].choices = languages
if not self.fields['language'].initial:
self.fields['language'].initial = get_language()
if 'page_type' in self.fields:
try:
type_root = Page.objects.get(publisher_is_draft=True, reverse_id=PAGE_TYPES_ID, site=site_id)
except Page.DoesNotExist:
type_root = None
if type_root:
language = self.fields['language'].initial
type_ids = type_root.get_descendants().values_list('pk', flat=True)
titles = Title.objects.filter(page__in=type_ids, language=language)
choices = [('', '----')]
for title in titles:
choices.append((title.page_id, title.title))
self.fields['page_type'].choices = choices
def clean(self):
cleaned_data = self.cleaned_data
slug = cleaned_data.get('slug', '')
page = self.instance
lang = cleaned_data.get('language', None)
# No language, can not go further, but validation failed already
if not lang:
return cleaned_data
parent = cleaned_data.get('parent', None)
site = self.cleaned_data.get('site', Site.objects.get_current())
if parent and parent.site != site:
raise ValidationError("Site doesn't match the parent's page site")
if site and not is_valid_page_slug(page, parent, lang, slug, site):
self._errors['slug'] = ErrorList([_('Another page with this slug already exists')])
del cleaned_data['slug']
if self.instance and page.title_set.count():
#Check for titles attached to the page makes sense only because
#AdminFormsTests.test_clean_overwrite_url validates the form with when no page instance available
#Looks like just a theoretical corner case
title = page.get_title_obj(lang, fallback=False)
if title and not isinstance(title, EmptyTitle) and slug:
oldslug = title.slug
title.slug = slug
title.save()
try:
is_valid_url(title.path, page)
except ValidationError as exc:
title.slug = oldslug
title.save()
if 'slug' in cleaned_data:
del cleaned_data['slug']
if hasattr(exc, 'messages'):
errors = exc.messages
else:
errors = [force_text(exc.message)]
self._errors['slug'] = ErrorList(errors)
return cleaned_data
def clean_slug(self):
slug = slugify(self.cleaned_data['slug'])
if not slug:
raise ValidationError(_("Slug must not be empty."))
return slug
class PublicationDatesForm(forms.ModelForm):
language = forms.ChoiceField(label=_("Language"), choices=get_language_tuple(),
help_text=_('The current language of the content fields.'))
def __init__(self, *args, **kwargs):
# Dates are not language dependent, so let's just fake the language to
# make the ModelAdmin happy
super(PublicationDatesForm, self).__init__(*args, **kwargs)
self.fields['language'].widget = HiddenInput()
self.fields['site'].widget = HiddenInput()
site_id = self.fields['site'].initial
languages = get_language_tuple(site_id)
self.fields['language'].choices = languages
if not self.fields['language'].initial:
self.fields['language'].initial = get_language()
class Meta:
model = Page
fields = ['site', 'publication_date', 'publication_end_date']
class AdvancedSettingsForm(forms.ModelForm):
from cms.forms.fields import PageSmartLinkField
application_urls = forms.ChoiceField(label=_('Application'),
choices=(), required=False,
help_text=_('Hook application to this page.'))
overwrite_url = forms.CharField(label=_('Overwrite URL'), max_length=255, required=False,
help_text=_('Keep this field empty if standard path should be used.'))
xframe_options = forms.ChoiceField(
choices=Page._meta.get_field('xframe_options').choices,
label=_('X Frame Options'),
help_text=_('Whether this page can be embedded in other pages or websites'),
initial=Page._meta.get_field('xframe_options').default,
required=False
)
redirect = PageSmartLinkField(label=_('Redirect'), required=False,
help_text=_('Redirects to this URL.'),
placeholder_text=_('Start typing...'),
ajax_view='admin:cms_page_get_published_pagelist'
)
language = forms.ChoiceField(label=_("Language"), choices=get_language_tuple(),
help_text=_('The current language of the content fields.'))
# This is really a 'fake' field which does not correspond to any Page attribute
# But creates a stub field to be populate by js
application_configs = forms.ChoiceField(label=_('Application configurations'),
choices=(), required=False,)
fieldsets = (
(None, {
'fields': ('overwrite_url', 'redirect'),
}),
(_('Language independent options'), {
'fields': ('site', 'template', 'reverse_id', 'soft_root', 'navigation_extenders',
'application_urls', 'application_namespace', 'application_configs',
'xframe_options',)
})
)
def __init__(self, *args, **kwargs):
super(AdvancedSettingsForm, self).__init__(*args, **kwargs)
self.fields['language'].widget = HiddenInput()
self.fields['site'].widget = HiddenInput()
site_id = self.fields['site'].initial
languages = get_language_tuple(site_id)
self.fields['language'].choices = languages
if not self.fields['language'].initial:
self.fields['language'].initial = get_language()
if 'navigation_extenders' in self.fields:
self.fields['navigation_extenders'].widget = forms.Select(
{}, [('', "---------")] + menu_pool.get_menus_by_attribute(
"cms_enabled", True))
if 'application_urls' in self.fields:
# Prepare a dict mapping the apps by class name ('PollApp') to
# their app_name attribute ('polls'), if any.
app_namespaces = {}
app_configs = {}
for hook in apphook_pool.get_apphooks():
app = apphook_pool.get_apphook(hook[0])
if app.app_name:
app_namespaces[hook[0]] = app.app_name
if app.app_config:
app_configs[hook[0]] = app
self.fields['application_urls'].widget = AppHookSelect(
attrs={'id': 'application_urls'},
app_namespaces=app_namespaces
)
self.fields['application_urls'].choices = [('', "---------")] + apphook_pool.get_apphooks()
page_data = self.data if self.data else self.initial
if app_configs:
self.fields['application_configs'].widget = ApplicationConfigSelect(
attrs={'id': 'application_configs'},
app_configs=app_configs)
if page_data.get('application_urls', False) and page_data['application_urls'] in app_configs:
self.fields['application_configs'].choices = [(config.pk, force_text(config)) for config in app_configs[page_data['application_urls']].get_configs()]
apphook = page_data.get('application_urls', False)
try:
config = apphook_pool.get_apphook(apphook).get_configs().get(namespace=self.initial['application_namespace'])
self.fields['application_configs'].initial = config.pk
except ObjectDoesNotExist:
# Provided apphook configuration doesn't exist (anymore),
# just skip it
# The user will choose another value anyway
pass
else:
# If app_config apphook is not selected, drop any value
# for application_configs to avoid the field data from
# being validated by the field itself
try:
del self.data['application_configs']
except KeyError:
pass
if 'redirect' in self.fields:
self.fields['redirect'].widget.language = self.fields['language'].initial
def _check_unique_namespace_instance(self, namespace):
return Page.objects.filter(
publisher_is_draft=True,
application_namespace=namespace
).exclude(pk=self.instance.pk).exists()
def clean(self):
cleaned_data = super(AdvancedSettingsForm, self).clean()
if 'reverse_id' in self.fields:
id = cleaned_data['reverse_id']
site_id = cleaned_data['site']
if id:
if Page.objects.filter(reverse_id=id, site=site_id, publisher_is_draft=True).exclude(
pk=self.instance.pk).count():
self._errors['reverse_id'] = self.error_class(
[_('A page with this reverse URL id exists already.')])
apphook = cleaned_data.get('application_urls', None)
# The field 'application_namespace' is a misnomer. It should be
# 'instance_namespace'.
instance_namespace = cleaned_data.get('application_namespace', None)
application_config = cleaned_data.get('application_configs', None)
if apphook:
# application_config wins over application_namespace
if application_config:
# the value of the application config namespace is saved in
# the 'usual' namespace field to be backward compatible
# with existing apphooks
config = apphook_pool.get_apphook(apphook).get_configs().get(pk=int(application_config))
if self._check_unique_namespace_instance(config.namespace):
# Looks like there's already one with the default instance
# namespace defined.
self._errors['application_configs'] = ErrorList([
_('An application instance using this configuration already exists.')
])
else:
self.cleaned_data['application_namespace'] = config.namespace
else:
if instance_namespace:
if self._check_unique_namespace_instance(instance_namespace):
self._errors['application_namespace'] = ErrorList([
_('An application instance with this name already exists.')
])
else:
# The attribute on the apps 'app_name' is a misnomer, it should be
# 'application_namespace'.
application_namespace = apphook_pool.get_apphook(apphook).app_name
if application_namespace and not instance_namespace:
if self._check_unique_namespace_instance(application_namespace):
# Looks like there's already one with the default instance
# namespace defined.
self._errors['application_namespace'] = ErrorList([
_('An application instance with this name already exists.')
])
else:
# OK, there are zero instances of THIS app that use the
# default instance namespace, so, since the user didn't
# provide one, we'll use the default. NOTE: The following
# line is really setting the "instance namespace" of the
# new app to the app’s "application namespace", which is
# the default instance namespace.
self.cleaned_data['application_namespace'] = application_namespace
if instance_namespace and not apphook:
self.cleaned_data['application_namespace'] = None
if application_config and not apphook:
self.cleaned_data['application_configs'] = None
return self.cleaned_data
def clean_xframe_options(self):
if 'xframe_options' not in self.fields:
return # nothing to do, field isn't present
xframe_options = self.cleaned_data['xframe_options']
if xframe_options == '':
return Page._meta.get_field('xframe_options').default
return xframe_options
def clean_overwrite_url(self):
if 'overwrite_url' in self.fields:
url = self.cleaned_data['overwrite_url']
is_valid_url(url, self.instance)
return url
class Meta:
model = Page
fields = [
'site', 'template', 'reverse_id', 'overwrite_url', 'redirect', 'soft_root', 'navigation_extenders',
'application_urls', 'application_namespace', "xframe_options",
]
class PagePermissionForm(forms.ModelForm):
class Meta:
model = Page
fields = ['login_required', 'limit_visibility_in_menu']
class PagePermissionInlineAdminForm(forms.ModelForm):
"""
Page permission inline admin form used in inline admin. Required, because
user and group queryset must be changed. User can see only users on the same
level or under him in choosen page tree, and users which were created by him,
but aren't assigned to higher page level than current user.
"""
page = forms.ModelChoiceField(Page.objects.all(), label=_('user'), widget=HiddenInput(), required=True)
def __init__(self, *args, **kwargs):
super(PagePermissionInlineAdminForm, self).__init__(*args, **kwargs)
user = get_current_user() # current user from threadlocals
sub_users = get_subordinate_users(user)
limit_choices = True
use_raw_id = False
# Unfortunately, if there are > 500 users in the system, non-superusers
# won't see any benefit here because if we ask Django to put all the
# user PKs in limit_choices_to in the query string of the popup we're
# in danger of causing 414 errors so we fall back to the normal input
# widget.
if get_cms_setting('RAW_ID_USERS'):
if sub_users.count() < 500:
# If there aren't too many users, proceed as normal and use a
# raw id field with limit_choices_to
limit_choices = True
use_raw_id = True
elif get_user_permission_level(user) == 0:
# If there are enough choices to possibly cause a 414 request
# URI too large error, we only proceed with the raw id field if
# the user is a superuser & thus can legitimately circumvent
# the limit_choices_to condition.
limit_choices = False
use_raw_id = True
# We don't use the fancy custom widget if the admin form wants to use a
# raw id field for the user
if use_raw_id:
from django.contrib.admin.widgets import ForeignKeyRawIdWidget
# This check will be False if the number of users in the system
# is less than the threshold set by the RAW_ID_USERS setting.
if isinstance(self.fields['user'].widget, ForeignKeyRawIdWidget):
# We can't set a queryset on a raw id lookup, but we can use
# the fact that it respects the limit_choices_to parameter.
if limit_choices:
self.fields['user'].widget.rel.limit_choices_to = dict(
id__in=list(sub_users.values_list('pk', flat=True))
)
else:
self.fields['user'].widget = UserSelectAdminWidget()
self.fields['user'].queryset = sub_users
self.fields['user'].widget.user = user # assign current user
self.fields['group'].queryset = get_subordinate_groups(user)
def clean(self):
super(PagePermissionInlineAdminForm, self).clean()
for field in self.Meta.model._meta.fields:
if not isinstance(field, BooleanField) or not field.name.startswith('can_'):
continue
name = field.name
self.cleaned_data[name] = self.cleaned_data.get(name, False)
can_add = self.cleaned_data['can_add']
can_edit = self.cleaned_data['can_change']
# check if access for childrens, or descendants is granted
if can_add and self.cleaned_data['grant_on'] == ACCESS_PAGE:
# this is a missconfiguration - user can add/move page to current
# page but after he does this, he will not have permissions to
# access this page anymore, so avoid this
raise forms.ValidationError(_("Add page permission requires also "
"access to children, or descendants, otherwise added page "
"can't be changed by its creator."))
if can_add and not can_edit:
raise forms.ValidationError(_('Add page permission also requires edit page permission.'))
# TODO: finish this, but is it really required? might be nice to have
# check if permissions assigned in cms are correct, and display
# a message if not - correctness mean: if user has add permission to
# page, but he doesn't have auth permissions to add page object,
# display warning
return self.cleaned_data
def save(self, commit=True):
"""
Makes sure the boolean fields are set to False if they aren't
available in the form.
"""
instance = super(PagePermissionInlineAdminForm, self).save(commit=False)
for field in self._meta.model._meta.fields:
if isinstance(field, BooleanField) and field.name.startswith('can_'):
setattr(instance, field.name, self.cleaned_data.get(field.name, False))
if commit:
instance.save()
return instance
class Meta:
fields = '__all__'
model = PagePermission
class ViewRestrictionInlineAdminForm(PagePermissionInlineAdminForm):
can_view = forms.BooleanField(label=_('can_view'), widget=HiddenInput(), initial=True)
def clean_can_view(self):
self.cleaned_data["can_view"] = True
return True
class GlobalPagePermissionAdminForm(forms.ModelForm):
def clean(self):
super(GlobalPagePermissionAdminForm, self).clean()
if not self.cleaned_data['user'] and not self.cleaned_data['group']:
raise forms.ValidationError(_('Please select user or group first.'))
return self.cleaned_data
class Meta:
fields = '__all__'
model = GlobalPagePermission
class GenericCmsPermissionForm(forms.ModelForm):
"""Generic form for User & Grup permissions in cms
"""
can_add_page = forms.BooleanField(label=_('Add'), required=False, initial=True)
can_change_page = forms.BooleanField(label=_('Change'), required=False, initial=True)
can_delete_page = forms.BooleanField(label=_('Delete'), required=False)
can_recover_page = forms.BooleanField(label=_('Recover (any) pages'), required=False)
# pageuser is for pageuser & group - they are combined together,
# and read out from PageUser model
can_add_pageuser = forms.BooleanField(label=_('Add'), required=False)
can_change_pageuser = forms.BooleanField(label=_('Change'), required=False)
can_delete_pageuser = forms.BooleanField(label=_('Delete'), required=False)
can_add_pagepermission = forms.BooleanField(label=_('Add'), required=False)
can_change_pagepermission = forms.BooleanField(label=_('Change'), required=False)
can_delete_pagepermission = forms.BooleanField(label=_('Delete'), required=False)
def populate_initials(self, obj):
"""Read out permissions from permission system.
"""
initials = {}
permission_accessor = get_permission_accessor(obj)
for model in (Page, PageUser, PagePermission):
name = model.__name__.lower()
content_type = ContentType.objects.get_for_model(model)
permissions = permission_accessor.filter(content_type=content_type).values_list('codename', flat=True)
for key in ('add', 'change', 'delete'):
codename = get_permission_codename(key, model._meta)
initials['can_%s_%s' % (key, name)] = codename in permissions
return initials
class PageUserForm(UserCreationForm, GenericCmsPermissionForm):
notify_user = forms.BooleanField(label=_('Notify user'), required=False,
help_text=_(
'Send email notification to user about username or password change. Requires user email.'))
class Meta:
fields = '__all__'
model = PageUser
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=':',
empty_permitted=False, instance=None):
if instance:
initial = initial or {}
initial.update(self.populate_initials(instance))
super(PageUserForm, self).__init__(data, files, auto_id, prefix,
initial, error_class, label_suffix, empty_permitted, instance)
if instance:
# if it is a change form, keep those fields as not required
# password will be changed only if there is something entered inside
self.fields['password1'].required = False
self.fields['password1'].label = _('New password')
self.fields['password2'].required = False
self.fields['password2'].label = _('New password confirmation')
self._password_change = True
def clean_username(self):
if self.instance:
return self.cleaned_data['username']
return super(PageUserForm, self).clean_username()
# required if the User model's USERNAME_FIELD is the email field
def clean_email(self):
if self.instance:
return self.cleaned_data['email']
return super(PageUserForm, self).clean_email()
def clean_password2(self):
if self.instance and self.cleaned_data['password1'] == '' and self.cleaned_data['password2'] == '':
self._password_change = False
return u''
return super(PageUserForm, self).clean_password2()
def clean(self):
cleaned_data = super(PageUserForm, self).clean()
notify_user = self.cleaned_data['notify_user']
if notify_user and not self.cleaned_data.get('email', None):
raise forms.ValidationError(_("Email notification requires valid email address."))
if self.cleaned_data['can_add_page'] and not self.cleaned_data['can_change_page']:
raise forms.ValidationError(_("The permission to add new pages requires the permission to change pages!"))
if self.cleaned_data['can_add_pageuser'] and not self.cleaned_data['can_change_pageuser']:
raise forms.ValidationError(_("The permission to add new users requires the permission to change users!"))
if self.cleaned_data['can_add_pagepermission'] and not self.cleaned_data['can_change_pagepermission']:
raise forms.ValidationError(_("To add permissions you also need to edit them!"))
return cleaned_data
def save(self, commit=True):
"""Create user, assign him to staff users, and create permissions for
him if required. Also assigns creator to user.
"""
Super = self._password_change and PageUserForm or UserCreationForm
user = super(Super, self).save(commit=False)
user.is_staff = True
created = not bool(user.pk)
# assign creator to user
if created:
get_current_user()
user.created_by = get_current_user()
if commit:
user.save()
save_permissions(self.cleaned_data, user)
if self.cleaned_data['notify_user']:
mail_page_user_change(user, created, self.cleaned_data['password1'])
return user
class PageUserGroupForm(GenericCmsPermissionForm):
class Meta:
model = PageUserGroup
fields = ('name', )
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=':',
empty_permitted=False, instance=None):
if instance:
initial = initial or {}
initial.update(self.populate_initials(instance))
super(PageUserGroupForm, self).__init__(data, files, auto_id, prefix,
initial, error_class, label_suffix, empty_permitted, instance)
def save(self, commit=True):
group = super(GenericCmsPermissionForm, self).save(commit=False)
created = not bool(group.pk)
# assign creator to user
if created:
group.created_by = get_current_user()
if commit:
group.save()
save_permissions(self.cleaned_data, group)
return group
|
bsd-3-clause
|
AnimeshSinha1309/WebsiteEdunet
|
WebsiteEdunet/env/Lib/site-packages/pip/_vendor/cachecontrol/serialize.py
|
317
|
6189
|
import base64
import io
import json
import zlib
from pip._vendor.requests.structures import CaseInsensitiveDict
from .compat import HTTPResponse, pickle
def _b64_encode_bytes(b):
return base64.b64encode(b).decode("ascii")
def _b64_encode_str(s):
return _b64_encode_bytes(s.encode("utf8"))
def _b64_decode_bytes(b):
return base64.b64decode(b.encode("ascii"))
def _b64_decode_str(s):
return _b64_decode_bytes(s).decode("utf8")
class Serializer(object):
def dumps(self, request, response, body=None):
response_headers = CaseInsensitiveDict(response.headers)
if body is None:
body = response.read(decode_content=False)
# NOTE: 99% sure this is dead code. I'm only leaving it
# here b/c I don't have a test yet to prove
# it. Basically, before using
# `cachecontrol.filewrapper.CallbackFileWrapper`,
# this made an effort to reset the file handle. The
# `CallbackFileWrapper` short circuits this code by
# setting the body as the content is consumed, the
# result being a `body` argument is *always* passed
# into cache_response, and in turn,
# `Serializer.dump`.
response._fp = io.BytesIO(body)
data = {
"response": {
"body": _b64_encode_bytes(body),
"headers": dict(
(_b64_encode_str(k), _b64_encode_str(v))
for k, v in response.headers.items()
),
"status": response.status,
"version": response.version,
"reason": _b64_encode_str(response.reason),
"strict": response.strict,
"decode_content": response.decode_content,
},
}
# Construct our vary headers
data["vary"] = {}
if "vary" in response_headers:
varied_headers = response_headers['vary'].split(',')
for header in varied_headers:
header = header.strip()
data["vary"][header] = request.headers.get(header, None)
# Encode our Vary headers to ensure they can be serialized as JSON
data["vary"] = dict(
(_b64_encode_str(k), _b64_encode_str(v) if v is not None else v)
for k, v in data["vary"].items()
)
return b",".join([
b"cc=2",
zlib.compress(
json.dumps(
data, separators=(",", ":"), sort_keys=True,
).encode("utf8"),
),
])
def loads(self, request, data):
# Short circuit if we've been given an empty set of data
if not data:
return
# Determine what version of the serializer the data was serialized
# with
try:
ver, data = data.split(b",", 1)
except ValueError:
ver = b"cc=0"
# Make sure that our "ver" is actually a version and isn't a false
# positive from a , being in the data stream.
if ver[:3] != b"cc=":
data = ver + data
ver = b"cc=0"
# Get the version number out of the cc=N
ver = ver.split(b"=", 1)[-1].decode("ascii")
# Dispatch to the actual load method for the given version
try:
return getattr(self, "_loads_v{0}".format(ver))(request, data)
except AttributeError:
# This is a version we don't have a loads function for, so we'll
# just treat it as a miss and return None
return
def prepare_response(self, request, cached):
"""Verify our vary headers match and construct a real urllib3
HTTPResponse object.
"""
# Special case the '*' Vary value as it means we cannot actually
# determine if the cached response is suitable for this request.
if "*" in cached.get("vary", {}):
return
# Ensure that the Vary headers for the cached response match our
# request
for header, value in cached.get("vary", {}).items():
if request.headers.get(header, None) != value:
return
body_raw = cached["response"].pop("body")
try:
body = io.BytesIO(body_raw)
except TypeError:
# This can happen if cachecontrol serialized to v1 format (pickle)
# using Python 2. A Python 2 str(byte string) will be unpickled as
# a Python 3 str (unicode string), which will cause the above to
# fail with:
#
# TypeError: 'str' does not support the buffer interface
body = io.BytesIO(body_raw.encode('utf8'))
return HTTPResponse(
body=body,
preload_content=False,
**cached["response"]
)
def _loads_v0(self, request, data):
# The original legacy cache data. This doesn't contain enough
# information to construct everything we need, so we'll treat this as
# a miss.
return
def _loads_v1(self, request, data):
try:
cached = pickle.loads(data)
except ValueError:
return
return self.prepare_response(request, cached)
def _loads_v2(self, request, data):
try:
cached = json.loads(zlib.decompress(data).decode("utf8"))
except ValueError:
return
# We need to decode the items that we've base64 encoded
cached["response"]["body"] = _b64_decode_bytes(
cached["response"]["body"]
)
cached["response"]["headers"] = dict(
(_b64_decode_str(k), _b64_decode_str(v))
for k, v in cached["response"]["headers"].items()
)
cached["response"]["reason"] = _b64_decode_str(
cached["response"]["reason"],
)
cached["vary"] = dict(
(_b64_decode_str(k), _b64_decode_str(v) if v is not None else v)
for k, v in cached["vary"].items()
)
return self.prepare_response(request, cached)
|
mit
|
tactcomplabs/gc64-hmcsim
|
test/sst/6.0.0/goblin_singlestream2.py
|
1
|
2128
|
import sst
# Define SST core options
sst.setProgramOption("timebase", "1ps")
sst.setProgramOption("stopAtCycle", "0 ns")
# Define the simulation components
comp_cpu = sst.Component("cpu", "miranda.BaseCPU")
comp_cpu.addParams({
"verbose" : 0,
"generator" : "miranda.SingleStreamGenerator",
"generatorParams.verbose" : 0,
"generatorParams.startat" : 3,
"generatorParams.count" : 500000,
"generatorParams.max_address" : 512000,
"printStats" : 1,
})
# Tell SST what statistics handling we want
sst.setStatisticLoadLevel(4)
# Enable statistics outputs
comp_cpu.enableAllStatistics({"type":"sst.AccumulatorStatistic"})
comp_l1cache = sst.Component("l1cache", "memHierarchy.Cache")
comp_l1cache.addParams({
"access_latency_cycles" : "2",
"cache_frequency" : "2 Ghz",
"replacement_policy" : "lru",
"coherence_protocol" : "MESI",
"associativity" : "4",
"cache_line_size" : "64",
"prefetcher" : "cassini.StridePrefetcher",
"debug" : "1",
"L1" : "1",
"cache_size" : "2KB"
})
# Enable statistics outputs
comp_l1cache.enableAllStatistics({"type":"sst.AccumulatorStatistic"})
comp_memory = sst.Component("memory", "memHierarchy.MemController")
comp_memory.addParams({
"coherence_protocol" : "MESI",
"backend.access_time" : "1000 ns",
"backend.mem_size" : "512",
"clock" : "1GHz",
"backend" : "memHierarchy.goblinHMCSim",
"backend.device_count" : "1",
"backend.link_count" : "4",
"backend.vault_count" : "32",
"backend.queue_depth" : "64",
"backend.bank_count" : "16",
"backend.dram_count" : "20",
"backend.capacity_per_device" : "4",
"backend.xbar_depth" : "128",
"backend.max_req_size" : "128"
})
# Define the simulation links
link_cpu_cache_link = sst.Link("link_cpu_cache_link")
link_cpu_cache_link.connect( (comp_cpu, "cache_link", "1000ps"), (comp_l1cache, "high_network_0", "1000ps") )
link_cpu_cache_link.setNoCut()
link_mem_bus_link = sst.Link("link_mem_bus_link")
link_mem_bus_link.connect( (comp_l1cache, "low_network_0", "50ps"), (comp_memory, "direct_link", "50ps") )
|
bsd-2-clause
|
cordoval/myhdl-python
|
example/cookbook/stopwatch/TimeCount.py
|
6
|
1628
|
from myhdl import *
def TimeCount(tens, ones, tenths, startstop, reset, clock):
""" 3 digit time counter in seconds and tenths of a second.
tens: most significant digit of the seconds
ones: least significant digit of the seconds
tenths: tenths of a second
startstop: input that starts or stops the counter on posedge
reset: reset input
clock: 10Hz clock input
"""
@instance
def logic():
seen = False
counting = False
while True:
yield clock.posedge, reset.posedge
if reset:
tens.next = 0
ones.next = 0
tenths.next = 0
seen = False
counting = False
else:
if startstop and not seen:
seen = True
counting = not counting
elif not startstop:
seen = False
if counting:
if tenths == 9:
tenths.next = 0
if ones == 9:
ones.next = 0
if tens == 5:
tens.next = 0
else:
tens.next = tens + 1
else:
ones.next = ones + 1
else:
tenths.next = tenths + 1
return logic
|
lgpl-2.1
|
valexandersaulys/airbnb_kaggle_contest
|
venv/lib/python3.4/site-packages/sklearn/neighbors/graph.py
|
208
|
7031
|
"""Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self, mode):
"""Return the query based on include_self param"""
# Done to preserve backward compatibility.
if include_self is None:
if mode == "connectivity":
warnings.warn(
"The behavior of 'kneighbors_graph' when mode='connectivity' "
"will change in version 0.18. Presently, the nearest neighbor "
"of each sample is the sample itself. Beginning in version "
"0.18, the default behavior will be to exclude each sample "
"from being its own nearest neighbor. To maintain the current "
"behavior, set include_self=True.", DeprecationWarning)
include_self = True
else:
include_self = False
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default backward-compatible.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default None
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.radius_neighbors_graph(query, radius, mode)
|
gpl-2.0
|
cntnboys/cmput410-project
|
venv/lib/python2.7/site-packages/setuptools/tests/test_develop.py
|
148
|
2816
|
"""develop tests
"""
import os
import shutil
import site
import sys
import tempfile
from setuptools.command.develop import develop
from setuptools.dist import Distribution
SETUP_PY = """\
from setuptools import setup
setup(name='foo',
packages=['foo'],
use_2to3=True,
)
"""
INIT_PY = """print "foo"
"""
class TestDevelopTest:
def setup_method(self, method):
if hasattr(sys, 'real_prefix'):
return
# Directory structure
self.dir = tempfile.mkdtemp()
os.mkdir(os.path.join(self.dir, 'foo'))
# setup.py
setup = os.path.join(self.dir, 'setup.py')
f = open(setup, 'w')
f.write(SETUP_PY)
f.close()
self.old_cwd = os.getcwd()
# foo/__init__.py
init = os.path.join(self.dir, 'foo', '__init__.py')
f = open(init, 'w')
f.write(INIT_PY)
f.close()
os.chdir(self.dir)
self.old_base = site.USER_BASE
site.USER_BASE = tempfile.mkdtemp()
self.old_site = site.USER_SITE
site.USER_SITE = tempfile.mkdtemp()
def teardown_method(self, method):
if hasattr(sys, 'real_prefix') or (hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix):
return
os.chdir(self.old_cwd)
shutil.rmtree(self.dir)
shutil.rmtree(site.USER_BASE)
shutil.rmtree(site.USER_SITE)
site.USER_BASE = self.old_base
site.USER_SITE = self.old_site
def test_develop(self):
if hasattr(sys, 'real_prefix'):
return
dist = Distribution(
dict(name='foo',
packages=['foo'],
use_2to3=True,
version='0.0',
))
dist.script_name = 'setup.py'
cmd = develop(dist)
cmd.user = 1
cmd.ensure_finalized()
cmd.install_dir = site.USER_SITE
cmd.user = 1
old_stdout = sys.stdout
#sys.stdout = StringIO()
try:
cmd.run()
finally:
sys.stdout = old_stdout
# let's see if we got our egg link at the right place
content = os.listdir(site.USER_SITE)
content.sort()
assert content == ['easy-install.pth', 'foo.egg-link']
# Check that we are using the right code.
egg_link_file = open(os.path.join(site.USER_SITE, 'foo.egg-link'), 'rt')
try:
path = egg_link_file.read().split()[0].strip()
finally:
egg_link_file.close()
init_file = open(os.path.join(path, 'foo', '__init__.py'), 'rt')
try:
init = init_file.read().strip()
finally:
init_file.close()
if sys.version < "3":
assert init == 'print "foo"'
else:
assert init == 'print("foo")'
|
apache-2.0
|
sudarkoff/ansible
|
test/units/playbook/test_taggable.py
|
293
|
4452
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.playbook.taggable import Taggable
from units.mock.loader import DictDataLoader
class TaggableTestObj(Taggable):
def __init__(self):
self._loader = DictDataLoader({})
self.tags = []
class TestTaggable(unittest.TestCase):
def assert_evaluate_equal(self, test_value, tags, only_tags, skip_tags):
taggable_obj = TaggableTestObj()
taggable_obj.tags = tags
evaluate = taggable_obj.evaluate_tags(only_tags, skip_tags, {})
self.assertEqual(test_value, evaluate)
def test_evaluate_tags_tag_in_only_tags(self):
self.assert_evaluate_equal(True, ['tag1', 'tag2'], ['tag1'], [])
def test_evaluate_tags_tag_in_skip_tags(self):
self.assert_evaluate_equal(False, ['tag1', 'tag2'], [], ['tag1'])
def test_evaluate_tags_special_always_in_object_tags(self):
self.assert_evaluate_equal(True, ['tag', 'always'], ['random'], [])
def test_evaluate_tags_tag_in_skip_tags_special_always_in_object_tags(self):
self.assert_evaluate_equal(False, ['tag', 'always'], ['random'], ['tag'])
def test_evaluate_tags_special_always_in_skip_tags_and_always_in_tags(self):
self.assert_evaluate_equal(False, ['tag', 'always'], [], ['always'])
def test_evaluate_tags_special_tagged_in_only_tags_and_object_tagged(self):
self.assert_evaluate_equal(True, ['tag'], ['tagged'], [])
def test_evaluate_tags_special_tagged_in_only_tags_and_object_untagged(self):
self.assert_evaluate_equal(False, [], ['tagged'], [])
def test_evaluate_tags_special_tagged_in_skip_tags_and_object_tagged(self):
self.assert_evaluate_equal(False, ['tag'], [], ['tagged'])
def test_evaluate_tags_special_tagged_in_skip_tags_and_object_untagged(self):
self.assert_evaluate_equal(True, [], [], ['tagged'])
def test_evaluate_tags_special_untagged_in_only_tags_and_object_tagged(self):
self.assert_evaluate_equal(False, ['tag'], ['untagged'], [])
def test_evaluate_tags_special_untagged_in_only_tags_and_object_untagged(self):
self.assert_evaluate_equal(True, [], ['untagged'], [])
def test_evaluate_tags_special_untagged_in_skip_tags_and_object_tagged(self):
self.assert_evaluate_equal(True, ['tag'], [], ['untagged'])
def test_evaluate_tags_special_untagged_in_skip_tags_and_object_untagged(self):
self.assert_evaluate_equal(False, [], [], ['untagged'])
def test_evaluate_tags_special_all_in_only_tags(self):
self.assert_evaluate_equal(True, ['tag'], ['all'], ['untagged'])
def test_evaluate_tags_special_all_in_skip_tags(self):
self.assert_evaluate_equal(False, ['tag'], ['tag'], ['all'])
def test_evaluate_tags_special_all_in_only_tags_and_special_all_in_skip_tags(self):
self.assert_evaluate_equal(False, ['tag'], ['all'], ['all'])
def test_evaluate_tags_special_all_in_skip_tags_and_always_in_object_tags(self):
self.assert_evaluate_equal(True, ['tag', 'always'], [], ['all'])
def test_evaluate_tags_special_all_in_skip_tags_and_special_always_in_skip_tags_and_always_in_object_tags(self):
self.assert_evaluate_equal(False, ['tag', 'always'], [], ['all', 'always'])
def test_evaluate_tags_accepts_lists(self):
self.assert_evaluate_equal(True, ['tag1', 'tag2'], ['tag2'], [])
def test_evaluate_tags_accepts_strings(self):
self.assert_evaluate_equal(True, 'tag1,tag2', ['tag2'], [])
def test_evaluate_tags_with_repeated_tags(self):
self.assert_evaluate_equal(False, ['tag', 'tag'], [], ['tag'])
|
gpl-3.0
|
ManjiriBirajdar/coala
|
tests/misc/ShellTest.py
|
11
|
3511
|
from contextlib import ExitStack
import os
import sys
from tempfile import NamedTemporaryFile
import unittest
from coalib.misc.Shell import run_interactive_shell_command, run_shell_command
class RunShellCommandTest(unittest.TestCase):
@staticmethod
def construct_testscript_command(scriptname):
return (sys.executable,
os.path.join(os.path.dirname(os.path.realpath(__file__)),
"run_shell_command_testfiles",
scriptname))
def test_run_interactive_shell_command(self):
command = RunShellCommandTest.construct_testscript_command(
"test_interactive_program.py")
with run_interactive_shell_command(command) as p:
self.assertEqual(p.stdout.readline(), "test_program X\n")
self.assertEqual(p.stdout.readline(), "Type in a number:\n")
p.stdin.write("33\n")
p.stdin.flush()
self.assertEqual(p.stdout.readline(), "33\n")
self.assertEqual(p.stdout.readline(), "Exiting program.\n")
self.assertEqual(p.stdout.read(), "")
self.assertEqual(p.stderr.read(), "")
def test_run_interactive_shell_command_custom_streams(self):
command = RunShellCommandTest.construct_testscript_command(
"test_interactive_program.py")
with ExitStack() as stack:
streams = {s: stack.enter_context(NamedTemporaryFile(mode="w+"))
for s in ["stdout", "stderr", "stdin"]}
with run_interactive_shell_command(command, **streams) as p:
streams["stdin"].write("712\n")
streams["stdin"].flush()
streams["stdin"].seek(0)
self.assertFalse(streams["stdout"].closed)
self.assertFalse(streams["stderr"].closed)
self.assertFalse(streams["stdin"].closed)
streams["stdout"].seek(0)
self.assertEqual(streams["stdout"].read(),
"test_program X\nType in a number:\n712\n"
"Exiting program.\n")
streams["stderr"].seek(0)
self.assertEqual(streams["stderr"].read(), "")
def test_run_interactive_shell_command_kwargs_delegation(self):
with self.assertRaises(TypeError):
with run_interactive_shell_command("some_command",
weird_parameter=30):
pass
def test_run_shell_command_without_stdin(self):
command = RunShellCommandTest.construct_testscript_command(
"test_program.py")
stdout, stderr = run_shell_command(command)
expected = ("test_program Z\n"
"non-interactive mode.\n"
"Exiting...\n")
self.assertEqual(stdout, expected)
self.assertEqual(stderr, "")
def test_run_shell_command_with_stdin(self):
command = RunShellCommandTest.construct_testscript_command(
"test_input_program.py")
stdout, stderr = run_shell_command(command, "1 4 10 22")
self.assertEqual(stdout, "37\n")
self.assertEqual(stderr, "")
stdout, stderr = run_shell_command(command, "1 p 5")
self.assertEqual(stdout, "")
self.assertEqual(stderr, "INVALID INPUT\n")
def test_run_shell_command_kwargs_delegation(self):
with self.assertRaises(TypeError):
run_shell_command("super-cool-command", weird_parameter2="abc")
|
agpl-3.0
|
ananthonline/grpc
|
tools/distrib/python/submit.py
|
44
|
3760
|
#!/usr/bin/env python2.7
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import os
import shutil
import subprocess
parser = argparse.ArgumentParser(
description='Submit the package to a PyPI repository.')
parser.add_argument(
'--repository', '-r', metavar='r', type=str, default='pypi',
help='The repository to push the package to. '
'Ensure the value appears in your .pypirc file. '
'Defaults to "pypi".'
)
parser.add_argument(
'--identity', '-i', metavar='i', type=str,
help='GPG identity to sign the files with.'
)
parser.add_argument(
'--username', '-u', metavar='u', type=str,
help='Username to authenticate with the repository. Not needed if you have '
'configured your .pypirc to include your username.'
)
parser.add_argument(
'--password', '-p', metavar='p', type=str,
help='Password to authenticate with the repository. Not needed if you have '
'configured your .pypirc to include your password.'
)
parser.add_argument(
'--bdist', '-b', action='store_true',
help='Generate a binary distribution (wheel) for the current OS.'
)
parser.add_argument(
'--dist-args', type=str,
help='Additional arguments to pass to the *dist setup.py command.'
)
args = parser.parse_args()
# Move to the root directory of Python GRPC.
pkgdir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../../../')
# Remove previous distributions; they somehow confuse twine.
try:
shutil.rmtree(os.path.join(pkgdir, 'dist/'))
except:
pass
# Build the Cython C files
build_env = os.environ.copy()
build_env['GRPC_PYTHON_BUILD_WITH_CYTHON'] = "1"
cmd = ['python', 'setup.py', 'build_ext', '--inplace']
subprocess.call(cmd, cwd=pkgdir, env=build_env)
# Make the push.
if args.bdist:
cmd = ['python', 'setup.py', 'bdist_wheel']
else:
cmd = ['python', 'setup.py', 'sdist']
if args.dist_args:
cmd += args.dist_args.split()
subprocess.call(cmd, cwd=pkgdir)
cmd = ['twine', 'upload', '-r', args.repository]
if args.identity is not None:
cmd.extend(['-i', args.identity])
if args.username is not None:
cmd.extend(['-u', args.username])
if args.password is not None:
cmd.extend(['-p', args.password])
cmd.append('dist/*')
subprocess.call(cmd, cwd=pkgdir)
|
bsd-3-clause
|
Titulacion-Sistemas/PythonTitulacion-EV
|
Lib/site-packages/pywin32-219-py2.7-win32.egg/test/test_win32wnet.py
|
17
|
5947
|
import unittest
import win32wnet
import win32api
import netbios
from pywin32_testutil import str2bytes
RESOURCE_CONNECTED = 0x00000001
RESOURCE_GLOBALNET = 0x00000002
RESOURCE_REMEMBERED = 0x00000003
RESOURCE_RECENT = 0x00000004
RESOURCE_CONTEXT = 0x00000005
RESOURCETYPE_ANY = 0x00000000
RESOURCETYPE_DISK = 0x00000001
RESOURCETYPE_PRINT = 0x00000002
RESOURCETYPE_RESERVED = 0x00000008
RESOURCETYPE_UNKNOWN = 0xFFFFFFFF
RESOURCEUSAGE_CONNECTABLE = 0x00000001
RESOURCEUSAGE_CONTAINER = 0x00000002
RESOURCEDISPLAYTYPE_GENERIC = 0x00000000
RESOURCEDISPLAYTYPE_DOMAIN = 0x00000001
RESOURCEDISPLAYTYPE_SERVER = 0x00000002
RESOURCEDISPLAYTYPE_SHARE = 0x00000003
NETRESOURCE_attributes = [
("dwScope", int),
("dwType", int),
("dwDisplayType", int),
("dwUsage", int),
("lpLocalName", str),
("lpRemoteName", str),
("lpComment", str),
("lpProvider", str),
]
NCB_attributes = [
("Command", int),
("Retcode", int),
("Lsn", int),
("Num", int),
# ("Bufflen", int), - read-only
("Callname", str),
("Name", str),
("Rto", int),
("Sto", int),
("Lana_num", int),
("Cmd_cplt", int),
("Event", int),
("Post", int),
]
class TestCase(unittest.TestCase):
def testGetUser(self):
self.assertEquals(win32api.GetUserName(), win32wnet.WNetGetUser())
def _checkItemAttributes(self, item, attrs):
for attr, typ in attrs:
val = getattr(item, attr)
if typ is int:
self.failUnless(type(val) in (int,),
"Attr %r has value %r" % (attr, val))
new_val = val + 1
elif typ is str:
if val is not None:
# on py2k, must be string or unicode. py3k must be string or bytes.
self.failUnless(type(val) in (str, unicode),
"Attr %r has value %r" % (attr, val))
new_val = val + " new value"
else:
new_val = "new value"
else:
self.fail("Don't know what %s is" % (typ,))
# set the attribute just to make sure we can.
setattr(item, attr, new_val)
def testNETRESOURCE(self):
nr = win32wnet.NETRESOURCE()
self._checkItemAttributes(nr, NETRESOURCE_attributes)
def testWNetEnumResource(self):
handle = win32wnet.WNetOpenEnum(RESOURCE_GLOBALNET, RESOURCETYPE_ANY,
0, None)
try:
while 1:
items = win32wnet.WNetEnumResource(handle, 0)
if len(items)==0:
break
for item in items:
self._checkItemAttributes(item, NETRESOURCE_attributes)
finally:
handle.Close()
def testNCB(self):
ncb = win32wnet.NCB()
self._checkItemAttributes(ncb, NCB_attributes)
def testNetbios(self):
# taken from the demo code in netbios.py
ncb = win32wnet.NCB()
ncb.Command = netbios.NCBENUM
la_enum = netbios.LANA_ENUM()
ncb.Buffer = la_enum
rc = win32wnet.Netbios(ncb)
self.failUnlessEqual(rc, 0)
for i in range(la_enum.length):
ncb.Reset()
ncb.Command = netbios.NCBRESET
ncb.Lana_num = netbios.byte_to_int(la_enum.lana[i])
rc = Netbios(ncb)
self.failUnlessEqual(rc, 0)
ncb.Reset()
ncb.Command = netbios.NCBASTAT
ncb.Lana_num = byte_to_int(la_enum.lana[i])
ncb.Callname = str2bytes("* ") # ensure bytes on py2x and 3k
adapter = netbios.ADAPTER_STATUS()
ncb.Buffer = adapter
Netbios(ncb)
# expect 6 bytes in the mac address.
self.failUnless(len(adapter.adapter_address), 6)
def iterConnectableShares(self):
nr = win32wnet.NETRESOURCE()
nr.dwScope = RESOURCE_GLOBALNET
nr.dwUsage = RESOURCEUSAGE_CONTAINER
nr.lpRemoteName = "\\\\" + win32api.GetComputerName()
handle = win32wnet.WNetOpenEnum(RESOURCE_GLOBALNET, RESOURCETYPE_ANY,
0, nr)
while 1:
items = win32wnet.WNetEnumResource(handle, 0)
if len(items)==0:
break
for item in items:
if item.dwDisplayType == RESOURCEDISPLAYTYPE_SHARE:
yield item
def findUnusedDriveLetter(self):
existing = [x[0].lower() for x in win32api.GetLogicalDriveStrings().split('\0') if x]
handle = win32wnet.WNetOpenEnum(RESOURCE_REMEMBERED,RESOURCETYPE_DISK,0,None)
try:
while 1:
items = win32wnet.WNetEnumResource(handle, 0)
if len(items)==0:
break
xtra = [i.lpLocalName[0].lower() for i in items if i.lpLocalName]
existing.extend(xtra)
finally:
handle.Close()
for maybe in 'defghijklmnopqrstuvwxyz':
if maybe not in existing:
return maybe
self.fail("All drive mappings are taken?")
def testAddConnection(self):
localName = self.findUnusedDriveLetter() + ':'
for share in self.iterConnectableShares():
share.lpLocalName = localName
win32wnet.WNetAddConnection2(share)
win32wnet.WNetCancelConnection2(localName, 0, 0)
break
def testAddConnectionOld(self):
localName = self.findUnusedDriveLetter() + ':'
for share in self.iterConnectableShares():
win32wnet.WNetAddConnection2(share.dwType, localName, share.lpRemoteName)
win32wnet.WNetCancelConnection2(localName, 0, 0)
break
if __name__ == '__main__':
unittest.main()
|
mit
|
pombredanne/django-tenant-schemas
|
examples/tenant_tutorial/customers/views.py
|
9
|
1718
|
from django.contrib.auth.models import User
from django.db.utils import DatabaseError
from django.views.generic import FormView
from customers.forms import GenerateUsersForm
from customers.models import Client
from random import choice
class TenantView(FormView):
form_class = GenerateUsersForm
template_name = "index_tenant.html"
success_url = "/"
def get_context_data(self, **kwargs):
context = super(TenantView, self).get_context_data(**kwargs)
context['tenants_list'] = Client.objects.all()
context['users'] = User.objects.all()
return context
def form_valid(self, form):
User.objects.all().delete() # clean current users
# generate five random users
USERS_TO_GENERATE = 5
first_names = ["Aiden", "Jackson", "Ethan", "Liam", "Mason", "Noah",
"Lucas", "Jacob", "Jayden", "Jack", "Sophia", "Emma",
"Olivia", "Isabella", "Ava", "Lily", "Zoe", "Chloe",
"Mia", "Madison"]
last_names = ["Smith", "Brown", "Lee ", "Wilson", "Martin", "Patel",
"Taylor", "Wong", "Campbell", "Williams"]
while User.objects.count() != USERS_TO_GENERATE:
first_name = choice(first_names)
last_name = choice(last_names)
try:
user = User(username=(first_name + last_name).lower(),
email="%s@%s.com" % (first_name, last_name),
first_name=first_name,
last_name=last_name)
user.save()
except DatabaseError:
pass
return super(TenantView, self).form_valid(form)
|
mit
|
thaskell1/volatility
|
volatility/plugins/linux/enumerate_files.py
|
12
|
2031
|
# Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: [email protected]
@organization:
"""
import volatility.obj as obj
import volatility.plugins.linux.common as linux_common
import volatility.plugins.linux.find_file as linux_find_file
from volatility.renderers import TreeGrid
from volatility.renderers.basic import Address
class linux_enumerate_files(linux_common.AbstractLinuxCommand):
"""Lists files referenced by the filesystem cache"""
def calculate(self):
linux_common.set_plugin_members(self)
for (_, _, file_path, file_dentry)in linux_find_file.linux_find_file(self._config).walk_sbs():
inode = file_dentry.d_inode
yield inode, inode.i_ino, file_path
def unified_output(self, data):
return TreeGrid([("Inode Address", Address), ("Inode Number", int), ("Path", str)],
self.generator(data))
def generator(self, data):
for inode, inum, path in data:
yield (0, [Address(inode.v()), int(inum), str(path)])
def render_text(self, outfd, data):
self.table_header(outfd, [("Inode Address", "[addr]"), ("Inode Number", "25"), ("Path", "")])
for inode, inum, path in data:
self.table_row(outfd, inode, inum, path)
|
gpl-2.0
|
jazkarta/edx-platform-for-isc
|
cms/djangoapps/contentstore/features/grading.py
|
12
|
6983
|
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from lettuce import world, step
from common import *
from terrain.steps import reload_the_page
from selenium.common.exceptions import InvalidElementStateException
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from contentstore.utils import reverse_course_url
from nose.tools import assert_in, assert_not_in, assert_equal, assert_not_equal # pylint: disable=no-name-in-module
@step(u'I am viewing the grading settings')
def view_grading_settings(step):
world.click_course_settings()
link_css = 'li.nav-course-settings-grading a'
world.css_click(link_css)
@step(u'I add "([^"]*)" new grade')
def add_grade(step, many):
grade_css = '.new-grade-button'
for i in range(int(many)):
world.css_click(grade_css)
@step(u'I delete a grade')
def delete_grade(step):
#grade_css = 'li.grade-specific-bar > a.remove-button'
#range_css = '.grade-specific-bar'
#world.css_find(range_css)[1].mouseover()
#world.css_click(grade_css)
world.browser.execute_script('document.getElementsByClassName("remove-button")[0].click()')
@step(u'I see I now have "([^"]*)" grades$')
def view_grade_slider(step, how_many):
grade_slider_css = '.grade-specific-bar'
all_grades = world.css_find(grade_slider_css)
assert_equal(len(all_grades), int(how_many))
@step(u'I move a grading section')
def move_grade_slider(step):
moveable_css = '.ui-resizable-e'
f = world.css_find(moveable_css).first
f.action_chains.drag_and_drop_by_offset(f._element, 100, 0).perform()
@step(u'I see that the grade range has changed')
def confirm_change(step):
range_css = '.range'
all_ranges = world.css_find(range_css)
for i in range(len(all_ranges)):
assert_not_equal(world.css_html(range_css, index=i), '0-50')
@step(u'I change assignment type "([^"]*)" to "([^"]*)"$')
def change_assignment_name(step, old_name, new_name):
name_id = '#course-grading-assignment-name'
index = get_type_index(old_name)
f = world.css_find(name_id)[index]
assert_not_equal(index, -1)
for __ in xrange(len(old_name)):
f._element.send_keys(Keys.END, Keys.BACK_SPACE)
f._element.send_keys(new_name)
@step(u'I go back to the main course page')
def main_course_page(step):
main_page_link = reverse_course_url('course_handler', world.scenario_dict['COURSE'].id)
world.visit(main_page_link)
assert_in('Course Outline', world.css_text('h1.page-header'))
@step(u'I do( not)? see the assignment name "([^"]*)"$')
def see_assignment_name(step, do_not, name):
# TODO: rewrite this once grading has been added back to the course outline
pass
# assignment_menu_css = 'ul.menu > li > a'
# # First assert that it is there, make take a bit to redraw
# assert_true(
# world.css_find(assignment_menu_css),
# msg="Could not find assignment menu"
# )
#
# assignment_menu = world.css_find(assignment_menu_css)
# allnames = [item.html for item in assignment_menu]
# if do_not:
# assert_not_in(name, allnames)
# else:
# assert_in(name, allnames)
@step(u'I delete the assignment type "([^"]*)"$')
def delete_assignment_type(step, to_delete):
delete_css = '.remove-grading-data'
world.css_click(delete_css, index=get_type_index(to_delete))
@step(u'I add a new assignment type "([^"]*)"$')
def add_assignment_type(step, new_name):
add_button_css = '.add-grading-data'
world.css_click(add_button_css)
name_id = '#course-grading-assignment-name'
new_assignment = world.css_find(name_id)[-1]
new_assignment._element.send_keys(new_name)
@step(u'I set the assignment weight to "([^"]*)"$')
def set_weight(step, weight):
weight_id = '#course-grading-assignment-gradeweight'
weight_field = world.css_find(weight_id)[-1]
old_weight = world.css_value(weight_id, -1)
for count in range(len(old_weight)):
weight_field._element.send_keys(Keys.END, Keys.BACK_SPACE)
weight_field._element.send_keys(weight)
@step(u'the assignment weight is displayed as "([^"]*)"$')
def verify_weight(step, weight):
weight_id = '#course-grading-assignment-gradeweight'
assert_equal(world.css_value(weight_id, -1), weight)
@step(u'I do not see the changes persisted on refresh$')
def changes_not_persisted(step):
reload_the_page(step)
name_id = '#course-grading-assignment-name'
assert_equal(world.css_value(name_id), 'Homework')
@step(u'I see the assignment type "(.*)"$')
def i_see_the_assignment_type(_step, name):
assignment_css = '#course-grading-assignment-name'
assignments = world.css_find(assignment_css)
types = [ele['value'] for ele in assignments]
assert_in(name, types)
@step(u'I change the highest grade range to "(.*)"$')
def change_grade_range(_step, range_name):
range_css = 'span.letter-grade'
grade = world.css_find(range_css).first
grade.value = range_name
@step(u'I see the highest grade range is "(.*)"$')
def i_see_highest_grade_range(_step, range_name):
range_css = 'span.letter-grade'
grade = world.css_find(range_css).first
assert_equal(grade.value, range_name)
@step(u'I cannot edit the "Fail" grade range$')
def cannot_edit_fail(_step):
range_css = 'span.letter-grade'
ranges = world.css_find(range_css)
assert_equal(len(ranges), 2)
assert_not_equal(ranges.last.value, 'Failure')
# try to change the grade range -- this should throw an exception
try:
ranges.last.value = 'Failure'
except (InvalidElementStateException):
pass # We should get this exception on failing to edit the element
# check to be sure that nothing has changed
ranges = world.css_find(range_css)
assert_equal(len(ranges), 2)
assert_not_equal(ranges.last.value, 'Failure')
@step(u'I change the grace period to "(.*)"$')
def i_change_grace_period(_step, grace_period):
grace_period_css = '#course-grading-graceperiod'
ele = world.css_find(grace_period_css).first
# Sometimes it takes a moment for the JavaScript
# to populate the field. If we don't wait for
# this to happen, then we can end up with
# an invalid value (e.g. "00:0048:00")
# which prevents us from saving.
assert_true(world.css_has_value(grace_period_css, "00:00"))
# Set the new grace period
ele.value = grace_period
@step(u'I see the grace period is "(.*)"$')
def the_grace_period_is(_step, grace_period):
grace_period_css = '#course-grading-graceperiod'
# The default value is 00:00
# so we need to wait for it to change
world.wait_for(
lambda _: world.css_has_value(grace_period_css, grace_period)
)
def get_type_index(name):
name_id = '#course-grading-assignment-name'
all_types = world.css_find(name_id)
for index in range(len(all_types)):
if world.css_value(name_id, index=index) == name:
return index
return -1
|
agpl-3.0
|
Lyleo/OmniMarkupPreviewer
|
OmniMarkupLib/Renderers/libs/python3/docutils/parsers/rst/languages/en.py
|
4
|
3277
|
# $Id$
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
English-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
'attention': 'attention',
'caution': 'caution',
'code': 'code',
'code-block': 'code',
'sourcecode': 'code',
'danger': 'danger',
'error': 'error',
'hint': 'hint',
'important': 'important',
'note': 'note',
'tip': 'tip',
'warning': 'warning',
'admonition': 'admonition',
'sidebar': 'sidebar',
'topic': 'topic',
'line-block': 'line-block',
'parsed-literal': 'parsed-literal',
'rubric': 'rubric',
'epigraph': 'epigraph',
'highlights': 'highlights',
'pull-quote': 'pull-quote',
'compound': 'compound',
'container': 'container',
#'questions': 'questions',
'table': 'table',
'csv-table': 'csv-table',
'list-table': 'list-table',
#'qa': 'questions',
#'faq': 'questions',
'meta': 'meta',
'math': 'math',
#'imagemap': 'imagemap',
'image': 'image',
'figure': 'figure',
'include': 'include',
'raw': 'raw',
'replace': 'replace',
'unicode': 'unicode',
'date': 'date',
'class': 'class',
'role': 'role',
'default-role': 'default-role',
'title': 'title',
'contents': 'contents',
'sectnum': 'sectnum',
'section-numbering': 'sectnum',
'header': 'header',
'footer': 'footer',
#'footnotes': 'footnotes',
#'citations': 'citations',
'target-notes': 'target-notes',
'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""English name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
# language-dependent: fixed
'abbreviation': 'abbreviation',
'ab': 'abbreviation',
'acronym': 'acronym',
'ac': 'acronym',
'code': 'code',
'index': 'index',
'i': 'index',
'subscript': 'subscript',
'sub': 'subscript',
'superscript': 'superscript',
'sup': 'superscript',
'title-reference': 'title-reference',
'title': 'title-reference',
't': 'title-reference',
'pep-reference': 'pep-reference',
'pep': 'pep-reference',
'rfc-reference': 'rfc-reference',
'rfc': 'rfc-reference',
'emphasis': 'emphasis',
'strong': 'strong',
'literal': 'literal',
'math': 'math',
'named-reference': 'named-reference',
'anonymous-reference': 'anonymous-reference',
'footnote-reference': 'footnote-reference',
'citation-reference': 'citation-reference',
'substitution-reference': 'substitution-reference',
'target': 'target',
'uri-reference': 'uri-reference',
'uri': 'uri-reference',
'url': 'uri-reference',
'raw': 'raw',}
"""Mapping of English role names to canonical role names for interpreted text.
"""
|
mit
|
be-cloud-be/horizon-addons
|
horizon/school_course_description/__init__.py
|
1
|
1024
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2015 be-cloud.be
# Jerome Sonnet <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import models
import controllers
#import wizard
|
agpl-3.0
|
dmerejkowsky/twittback
|
twittback/test/test_edit.py
|
1
|
1694
|
import types
import twittback.edit
import pytest
def setup_edit_test(tweet_factory, repository, mocker, *, nvim_returncode):
tweet_1 = tweet_factory.make_tweet(42, "First tweet!", date="2017-07-07")
tweet_2 = tweet_factory.make_tweet(57, "Second tweet", date="2017-08-02")
repository.add_tweets([tweet_1, tweet_2])
spy = types.SimpleNamespace()
spy.cmd = None
def fake_run(cmd):
stub_process = mocker.Mock()
spy.cmd = cmd
stub_process.returncode = nvim_returncode
path = cmd[1]
with open(path, "w") as stream:
stream.write("changed")
return stub_process
mocker.patch("subprocess.run", fake_run)
return spy
def test_edit_happy(tweet_factory, repository, mocker):
spy = setup_edit_test(tweet_factory, repository, mocker, nvim_returncode=0)
twittback.edit.edit(repository, 42)
assert spy.cmd[0] == "nvim"
assert repository.tweet_by_id(42).text == "changed"
def test_edit_editor_nonzero_exit(tweet_factory, repository, mocker):
spy = setup_edit_test(tweet_factory, repository, mocker, nvim_returncode=1)
with pytest.raises(SystemExit) as e:
twittback.edit.edit(repository, 42)
error_message = e.value.args[0]
assert "Edit failed" in error_message
assert repository.tweet_by_id(42).text == "First tweet!"
def test_edit_no_such_id(tweet_factory, repository, mocker):
spy = setup_edit_test(tweet_factory, repository, mocker, nvim_returncode=1)
with pytest.raises(SystemExit) as e:
twittback.edit.edit(repository, 1001)
error_message = e.value.args[0]
assert "No such id" in error_message
assert "1001" in error_message
|
mit
|
rhcarvalho/kombu
|
kombu/transport/django/__init__.py
|
1
|
1865
|
"""Kombu transport using the Django database as a message store."""
from __future__ import absolute_import
from django.conf import settings
from django.core import exceptions as errors
from kombu.five import Empty
from kombu.transport import virtual
from kombu.utils.encoding import bytes_to_str
from kombu.utils.json import loads, dumps
from .models import Queue
VERSION = (1, 0, 0)
__version__ = '.'.join(map(str, VERSION))
POLLING_INTERVAL = getattr(settings, 'KOMBU_POLLING_INTERVAL',
getattr(settings, 'DJKOMBU_POLLING_INTERVAL', 5.0))
class Channel(virtual.Channel):
def _new_queue(self, queue, **kwargs):
Queue.objects.get_or_create(name=queue)
def _put(self, queue, message, **kwargs):
Queue.objects.publish(queue, dumps(message))
def basic_consume(self, queue, *args, **kwargs):
qinfo = self.state.bindings[queue]
exchange = qinfo[0]
if self.typeof(exchange).type == 'fanout':
return
super(Channel, self).basic_consume(queue, *args, **kwargs)
def _get(self, queue):
m = Queue.objects.fetch(queue)
if m:
return loads(bytes_to_str(m))
raise Empty()
def _size(self, queue):
return Queue.objects.size(queue)
def _purge(self, queue):
return Queue.objects.purge(queue)
def refresh_connection(self):
from django import db
db.close_connection()
class Transport(virtual.Transport):
Channel = Channel
default_port = 0
polling_interval = POLLING_INTERVAL
channel_errors = (
virtual.Transport.channel_errors + (
errors.ObjectDoesNotExist, errors.MultipleObjectsReturned)
)
driver_type = 'sql'
driver_name = 'django'
def driver_version(self):
import django
return '.'.join(map(str, django.VERSION))
|
bsd-3-clause
|
bitcoinec/bitcoinec
|
contrib/devtools/optimize-pngs.py
|
111
|
3391
|
#!/usr/bin/env python
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script every time you change one of the png files. Using pngcrush, it will optimize the png files, remove various color profiles, remove ancillary chunks (alla) and text chunks (text).
#pngcrush -brute -ow -rem gAMA -rem cHRM -rem iCCP -rem sRGB -rem alla -rem text
'''
import os
import sys
import subprocess
import hashlib
from PIL import Image
def file_hash(filename):
'''Return hash of raw file contents'''
with open(filename, 'rb') as f:
return hashlib.sha256(f.read()).hexdigest()
def content_hash(filename):
'''Return hash of RGBA contents of image'''
i = Image.open(filename)
i = i.convert('RGBA')
data = i.tobytes()
return hashlib.sha256(data).hexdigest()
pngcrush = 'pngcrush'
git = 'git'
folders = ["src/qt/res/movies", "src/qt/res/icons", "share/pixmaps"]
basePath = subprocess.check_output([git, 'rev-parse', '--show-toplevel']).rstrip('\n')
totalSaveBytes = 0
noHashChange = True
outputArray = []
for folder in folders:
absFolder=os.path.join(basePath, folder)
for file in os.listdir(absFolder):
extension = os.path.splitext(file)[1]
if extension.lower() == '.png':
print("optimizing "+file+"..."),
file_path = os.path.join(absFolder, file)
fileMetaMap = {'file' : file, 'osize': os.path.getsize(file_path), 'sha256Old' : file_hash(file_path)}
fileMetaMap['contentHashPre'] = content_hash(file_path)
pngCrushOutput = ""
try:
pngCrushOutput = subprocess.check_output(
[pngcrush, "-brute", "-ow", "-rem", "gAMA", "-rem", "cHRM", "-rem", "iCCP", "-rem", "sRGB", "-rem", "alla", "-rem", "text", file_path],
stderr=subprocess.STDOUT).rstrip('\n')
except:
print "pngcrush is not installed, aborting..."
sys.exit(0)
#verify
if "Not a PNG file" in subprocess.check_output([pngcrush, "-n", "-v", file_path], stderr=subprocess.STDOUT):
print "PNG file "+file+" is corrupted after crushing, check out pngcursh version"
sys.exit(1)
fileMetaMap['sha256New'] = file_hash(file_path)
fileMetaMap['contentHashPost'] = content_hash(file_path)
if fileMetaMap['contentHashPre'] != fileMetaMap['contentHashPost']:
print "Image contents of PNG file "+file+" before and after crushing don't match"
sys.exit(1)
fileMetaMap['psize'] = os.path.getsize(file_path)
outputArray.append(fileMetaMap)
print("done\n"),
print "summary:\n+++++++++++++++++"
for fileDict in outputArray:
oldHash = fileDict['sha256Old']
newHash = fileDict['sha256New']
totalSaveBytes += fileDict['osize'] - fileDict['psize']
noHashChange = noHashChange and (oldHash == newHash)
print fileDict['file']+"\n size diff from: "+str(fileDict['osize'])+" to: "+str(fileDict['psize'])+"\n old sha256: "+oldHash+"\n new sha256: "+newHash+"\n"
print "completed. Checksum stable: "+str(noHashChange)+". Total reduction: "+str(totalSaveBytes)+" bytes"
|
mit
|
thanhacun/odoo
|
addons/account/ir_sequence.py
|
336
|
2454
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import api
from openerp.osv import fields, osv
class ir_sequence_fiscalyear(osv.osv):
_name = 'account.sequence.fiscalyear'
_rec_name = "sequence_main_id"
_columns = {
"sequence_id": fields.many2one("ir.sequence", 'Sequence', required=True,
ondelete='cascade'),
"sequence_main_id": fields.many2one("ir.sequence", 'Main Sequence',
required=True, ondelete='cascade'),
"fiscalyear_id": fields.many2one('account.fiscalyear', 'Fiscal Year',
required=True, ondelete='cascade')
}
_sql_constraints = [
('main_id', 'CHECK (sequence_main_id != sequence_id)',
'Main Sequence must be different from current !'),
]
class ir_sequence(osv.osv):
_inherit = 'ir.sequence'
_columns = {
'fiscal_ids': fields.one2many('account.sequence.fiscalyear',
'sequence_main_id', 'Sequences', copy=True)
}
@api.cr_uid_ids_context
def _next(self, cr, uid, seq_ids, context=None):
if context is None:
context = {}
for seq in self.browse(cr, uid, seq_ids, context):
for line in seq.fiscal_ids:
if line.fiscalyear_id.id == context.get('fiscalyear_id'):
return super(ir_sequence, self)._next(cr, uid, [line.sequence_id.id], context)
return super(ir_sequence, self)._next(cr, uid, seq_ids, context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
zammitjames/NLPS
|
example.py
|
1
|
2411
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from nlps import nlps
# James Zammit
title = "Steve Jobs Biography"
text = "Steven Paul Jobs was born on February 24, 1955, in San Francisco, California, to Joanne Schieble (later Joanne Simpson) and Abdulfattah 'John' Jandali, two University of Wisconsin graduate students who gave their unnamed son up for adoption. His father, Abdulfattah Jandali, was a Syrian political science professor, and his mother, Joanne Schieble, worked as a speech therapist. Shortly after Steve was placed for adoption, his biological parents married and had another child, Mona Simpson. It was not until Jobs was 27 that he was able to uncover information on his biological parents. As an infant, Steven was adopted by Clara and Paul Jobs and named Steven Paul Jobs. Clara worked as an accountant, and Paul was a Coast Guard veteran and machinist. The family lived in Mountain View, California, within the area that would later become known as Silicon Valley. As a boy, Jobs and his father would work on electronics in the family garage. Paul would show his son how to take apart and reconstruct electronics, a hobby that instilled confidence, tenacity and mechanical prowess in young Jobs. While Jobs was always an intelligent and innovative thinker, his youth was riddled with frustrations over formal schooling. Jobs was a prankster in elementary school, and his fourth-grade teacher needed to bribe him to study. Jobs tested so well, however, that administrators wanted to skip him ahead to high school—a proposal that his parents declined. A few years later, while Jobs was enrolled at Homestead High School (1971), he was introduced to his future partner, Steve Wozniak, through a friend of Wozniak's. Wozniak was attending the University of California, Berkeley, at the time. In a 2007 interview with PC World, Wozniak spoke about why he and Jobs clicked so well: 'We both loved electronics and the way we used to hook up digital chips,' Wozniak said. 'Very few people, especially back then, had any idea what chips were, how they worked and what they could do. I had designed many computers, so I was way ahead of him in electronics and computer design, but we still had common interests. We both had pretty much sort of an independent attitude about things in the world. ...'"
tt = nlps()
sentences = tt.summarize(title, text)
for sentence in sentences:
print sentence
|
mit
|
amitsela/beam
|
sdks/python/apache_beam/io/__init__.py
|
4
|
1583
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A package defining several input sources and output sinks."""
# pylint: disable=wildcard-import
from apache_beam.io.avroio import *
from apache_beam.io.fileio import *
from apache_beam.io.iobase import Read
from apache_beam.io.iobase import Sink
from apache_beam.io.iobase import Write
from apache_beam.io.iobase import Writer
from apache_beam.io.textio import *
from apache_beam.io.tfrecordio import *
from apache_beam.io.range_trackers import *
# Protect against environments where clientslibrary is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from apache_beam.io.gcp.bigquery import *
from apache_beam.io.gcp.pubsub import *
from apache_beam.io.gcp import gcsio
except ImportError:
pass
# pylint: enable=wrong-import-order, wrong-import-position
|
apache-2.0
|
adjackura/compute-image-tools
|
image_test/configuration/linux/redhat.py
|
3
|
3112
|
#!/usr/bin/env python3
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import generic_distro
import utils
class RedHatTests(generic_distro.GenericDistroTests):
"""
Abstract class. Please use a derived one.
"""
__metaclass__ = abc.ABCMeta
@utils.RetryOnFailure()
def TestPackageInstallation(self):
"""
Network instabilities can lead to errors when fetching the yum repository.
It worths to try if that happens
"""
# install something to test repository sanity
utils.Execute(['yum', '-y', 'install', 'tree'])
# in case it was already installed, ask for reinstall just to be sure
utils.Execute(['yum', '-y', 'reinstall', 'tree'])
def IsPackageInstalled(self, package_name):
# the following command returns zero if package is installed
command = ['yum', 'list', 'installed', package_name]
rc, output = utils.Execute(command, raise_errors=False)
return rc == 0
def TestPackageManagerConfig(self):
command = ['grep', '-r', 'packages.cloud.google.com', '/etc/yum.repos.d/']
utils.Execute(command)
@abc.abstractmethod
def GetYumCronConfig(self):
"""
Return the location of yum-cron configuration on the system and a
configuration dictionary to be checked on
"""
pass
def TestAutomaticSecurityUpdates(self):
# the following command returns zero if package is installed
utils.Execute(['yum', 'list', 'installed', 'yum-cron'])
# service returns zero if service exists and is running
utils.Execute(['service', 'yum-cron', 'status'])
# check yum-cron configuration
# Now this part is, unfortunately, different between RedHat 6 and 7
yum_cron_file, configs = self.GetYumCronConfig()
for key in configs:
command = ['grep', key, yum_cron_file]
rc, output = utils.Execute(command, capture_output=True)
# get clean text after '=' token
cur_value = generic_distro.RemoveCommentAndStrip(
output[output.find('=') + 1:]
)
if configs[key] != cur_value:
raise Exception('Yum-cron config "%s" is "%s" but expected "%s"' % (
key, cur_value, configs[key]))
class RedHat6Tests(RedHatTests):
def GetYumCronConfig(self):
return (
'/etc/sysconfig/yum-cron',
{
'CHECK_ONLY': 'no',
'DOWNLOAD_ONLY': 'no',
}
)
class RedHat7Tests(RedHatTests):
def GetYumCronConfig(self):
return (
'/etc/yum/yum-cron.conf',
{
'download_updates': 'yes',
'apply_updates': 'yes',
}
)
|
apache-2.0
|
FlymeOS/tools
|
smaliparser/reject.py
|
2
|
9326
|
'''
Created on Jun 5, 2014
@author: tangliuxiang
'''
import Smali
import utils
import re
import sys
import os
import SmaliEntry
import Replace
import SAutoCom
import tempfile
import LibUtils
from formatters.log import Paint
class reject(object):
REJ_BEGIN = '<<<<<<<'
REJ_SPLIT = '======='
REJ_END = '>>>>>>>'
REJ_MULTI_STR = '^%s.*$|^%s.*$|^%s.*$' % (REJ_BEGIN, REJ_SPLIT, REJ_END)
RE_REJ = re.compile(REJ_MULTI_STR, re.M)
FLAG_ENTRY_NORMAL = 0
FLAG_ENTRY_REJECT = 1
FLAG_REPLACED_TO_BOARD = 2
'''
classdocs
'''
def __init__(self, rejectFilePath):
self.mASLib = LibUtils.getSmaliLib(utils.AOSP, 1)
self.mBSLib = LibUtils.getSmaliLib(utils.BOSP, 1)
self.mTSLib = LibUtils.getSmaliLib(utils.TARGET, 1)
self.mRejSLib = LibUtils.getOwnLib(rejectFilePath)
self.rejSmali = Smali.Smali(rejectFilePath)
self.mClassName = self.rejSmali.getClassName()
self.aospSmali = self.mASLib.getFormatSmali(self.mClassName)
self.bospSmali = self.mBSLib.getFormatSmali(self.mClassName)
self.targetSmali = self.mTSLib.getFormatSmali(self.mClassName)
self.mFormatList = {}
self.mCanNotReplaceEntry = []
self.parseReject()
def parseReject(self):
for entry in self.rejSmali.getEntryList():
if self.hasReject(entry.getContentStr()):
entry.addFlag(reject.FLAG_ENTRY_REJECT)
def __multiEntryReject__(self):
lastEntry = None
targetSmaliModified = False
for entry in self.rejSmali.getEntryList():
if self.hasReject(entry.getPreContentStr()):
entry.setPreContentStr(self.rmRejectTagLine(entry.getPreContentStr()))
if not self.targetSmali.hasEntry(entry.getType(), entry.getName()):
if not self.isRejectEntry(entry):
idx = -1
if lastEntry is not None:
tEntry = self.targetSmali.getEntry(lastEntry.getType(), lastEntry.getName())
idx = self.targetSmali.getIndex(tEntry) + 1
utils.SLog.ok("\n>>>> Fix reject %s %s in %s: " % (entry.getType(), entry.getName(), self.rejSmali.getPath()))
utils.SLog.ok(" Add %s %s in %s from bosp" % (entry.getType(), entry.getName(), self.getRealTargetPath(self.targetSmali)))
self.targetSmali.addEntry(entry, idx, utils.annotation.getAddToBospPreContent(entry))
targetSmaliModified = True
else:
lastEntry = entry
if targetSmaliModified:
self.targetSmali.out()
self.rejSmali.out()
FIX_EXACT = 0
FIX_LIGHT = 1
FIX_MIDDLE = 2
FIX_HEAVY = 3
def fix(self, level=FIX_EXACT):
if level >= reject.FIX_EXACT:
self.__multiEntryReject__()
if level >= reject.FIX_LIGHT:
# print "replace only which can replace methods"
# print "add missed class"
# print "add can replace method"
if not utils.precheck.shouldIgnore(self.rejSmali):
self.handleLightFix()
else:
utils.SLog.fail(">>>> Failed on fix reject in %s" %(self.getRealTargetPath(self.targetSmali)))
if level >= reject.FIX_MIDDLE:
# print "which can not replace method: use blank"
self.handleMiddleFix()
if level >= reject.FIX_HEAVY:
print "which missed field, replace class"
self.__exit()
def getRejectEntryList(self):
rejectEntryList = []
for entry in self.rejSmali.getEntryList():
if self.isRejectEntry(entry):
rejectEntryList.append(entry)
return rejectEntryList
def getRejectEntry(self, oriEntry):
for entry in self.getRejectEntryList():
if entry.getClassName() == oriEntry.getClassName() \
and entry.getType() == oriEntry.getType() \
and entry.getName() == oriEntry.getName():
return entry
return None
def __exit(self):
hasReject = self.hasReject(self.rejSmali.toString())
if hasReject:
self.rejSmali.out()
if hasReject:
self.rejSmali = Smali.Smali(self.rejSmali.getPath())
self.rejSmali.out(self.getOutRejectFilePath())
def getOutRejectFilePath(self):
start = len(os.path.abspath(utils.REJECT))
rejPath = self.rejSmali.getPath()
return "%s/%s" % (utils.OUT_REJECT, rejPath[start:])
def getRealTargetPath(self, tSmali):
return "%s/smali/%s" % (utils.getJarNameFromPath(tSmali.getPath()), utils.getBaseSmaliPath(tSmali.getPath()))
def replaceEntryToBosp(self, entry):
if (entry.getFlag() & reject.FLAG_REPLACED_TO_BOARD) == 0:
self.mTSLib.replaceEntry(self.mBSLib, entry)
if self.mRejSLib.getSmali(entry.getClassName()) is not None:
self.rejSmali = self.mRejSLib.replaceEntry(self.mBSLib, entry, False, False, True)
rejEntry = self.getRejectEntry(entry)
if rejEntry is not None:
rejEntry.setFlag(reject.FLAG_ENTRY_NORMAL)
entry.addFlag(reject.FLAG_REPLACED_TO_BOARD)
def handleLightFix(self):
target = os.path.relpath(self.rejSmali.getPath(), utils.REJECT)
print " "
print " %s %s" % (Paint.bold("FIX CONFLICTS IN"), target)
for mEntry in self.mBSLib.getEntryList(self.getRejectEntryList()):
(canReplaceEntry, canNotReplaceEntry) = self.mTSLib.getCanReplaceEntry(self.mBSLib, self.rejSmali.getClassName(), [mEntry], False)
if utils.has(canReplaceEntry, mEntry):
print " %s %s" % (Paint.green("[PASS]"), mEntry.getName())
utils.SLog.ok("\n>>>> Fix reject %s %s in %s: " % (mEntry.getType(), mEntry.getName(), self.rejSmali.getPath()))
self.replaceEntryToBosp(mEntry)
for entry in canReplaceEntry:
if entry != mEntry:
self.replaceEntryToBosp(entry)
if len(canNotReplaceEntry) > 0:
self.mCanNotReplaceEntry.extend(canNotReplaceEntry)
if utils.has(canNotReplaceEntry, mEntry):
print " %s %s" % (Paint.red("[FAIL]"), mEntry.getName())
utils.SLog.fail(" %s" % target)
#utils.SLog.fail(" CONFLICTS: %s %s in %s" % (mEntry.getType(), mEntry.getName(), self.getRealTargetPath(self.mTSLib.getSmali(mEntry.getClassName()))))
#utils.SLog.fail(" Can not be replaced by bosp, because of the follows:")
for entry in canNotReplaceEntry:
#utils.SLog.fail(" %s %s in %s" % (entry.getType(), entry.getName(), self.getRealTargetPath(self.mTSLib.getSmali(entry.getClassName()))))
pass
@staticmethod
def missMethod(sLib, entry):
try:
missedMethodsLen = len(sLib.getMissedMethods(entry))
except Exception as e:
utils.SLog.d(e)
return True
return missedMethodsLen > 0
@staticmethod
def missField(sLib, entry):
try:
missedFieldssLen = len(sLib.getMissedFields(entry))
except Exception as e:
utils.SLog.d(e)
return True
return missedFieldssLen > 0
def handleMiddleFix(self):
for entry in self.mCanNotReplaceEntry:
if entry.getType() == SmaliEntry.METHOD:
clsName = entry.getClassName()
tSmali = self.mTSLib.getSmali(clsName)
bSmali = self.mBSLib.getSmali(clsName)
if not tSmali.hasEntry(entry.getType(), entry.getName()):
Replace.Replace.appendBlankEntry(entry, self.mTSLib.getSmali(entry.getClassName()).getPath())
@staticmethod
def isRejectEntry(entry):
return entry.getFlag() & reject.FLAG_ENTRY_REJECT != 0
@staticmethod
def hasReject(string):
if string is None:
return False
if bool(reject.RE_REJ.search(string)):
return True
else:
return False
@staticmethod
def rmRejectTagLine(string):
if string is None:
return string
outString = ""
for line in string.splitlines():
if not reject.hasReject(line):
outString = "%s%s" % (outString, line)
return outString
REJECT_ADVICE = "%s/help/reject_advice" %os.path.dirname(os.path.abspath(__file__))
REJECT_SUCCESS = "%s/help/reject_success" %os.path.dirname(os.path.abspath(__file__))
def fixReject():
utils.annotation.setAction("make autofix")
for rejectFile in utils.getSmaliPathList(utils.REJECT, 2):
reject(rejectFile).fix(reject.FIX_LIGHT)
utils.SLog.setAdviceStr(file(REJECT_ADVICE).read())
utils.SLog.setSuccessStr(file(REJECT_SUCCESS).read())
utils.SLog.conclude()
LibUtils.undoFormat()
if __name__ == "__main__":
fixReject()
|
apache-2.0
|
kapilt/cloud-custodian
|
c7n/resources/shield.py
|
5
|
5811
|
# Copyright 2015-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from botocore.exceptions import ClientError
from botocore.paginate import Paginator
from c7n.actions import BaseAction
from c7n.filters import Filter
from c7n.manager import resources
from c7n.query import QueryResourceManager, RetryPageIterator, TypeInfo
from c7n.utils import local_session, type_schema, get_retry
@resources.register('shield-protection')
class ShieldProtection(QueryResourceManager):
class resource_type(TypeInfo):
service = 'shield'
enum_spec = ('list_protections', 'Protections', None)
id = 'Id'
name = 'Name'
arn = False
@resources.register('shield-attack')
class ShieldAttack(QueryResourceManager):
class resource_type(TypeInfo):
service = 'shield'
enum_spec = ('list_attacks', 'Attacks', None)
detail_spec = (
'describe_attack', 'AttackId', 'AttackId', 'Attack')
name = id = 'AttackId'
date = 'StartTime'
filter_name = 'ResourceArns'
filter_type = 'list'
arn = False
def get_protections_paginator(client):
return Paginator(
client.list_protections,
{'input_token': 'NextToken', 'output_token': 'NextToken', 'result_key': 'Protections'},
client.meta.service_model.operation_model('ListProtections'))
def get_type_protections(client, model):
pager = get_protections_paginator(client)
pager.PAGE_ITERATOR_CLS = RetryPageIterator
try:
protections = pager.paginate().build_full_result().get('Protections', [])
except client.exceptions.ResourceNotFoundException:
# shield is not enabled in the account, so all resources are not protected
return []
return [p for p in protections if model.arn_type in p['ResourceArn']]
ShieldRetry = get_retry(('ThrottlingException',))
class IsShieldProtected(Filter):
permissions = ('shield:ListProtections',)
schema = type_schema('shield-enabled', state={'type': 'boolean'})
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client(
'shield', region_name='us-east-1')
protections = get_type_protections(client, self.manager.get_model())
protected_resources = {p['ResourceArn'] for p in protections}
state = self.data.get('state', False)
results = []
for arn, r in zip(self.manager.get_arns(resources), resources):
r['c7n:ShieldProtected'] = shielded = arn in protected_resources
if shielded and state:
results.append(r)
elif not shielded and not state:
results.append(r)
return results
class SetShieldProtection(BaseAction):
"""Enable shield protection on applicable resource.
setting `sync` parameter will also clear out stale shield protections
for resources that no longer exist.
"""
permissions = ('shield:CreateProtection', 'shield:ListProtections',)
schema = type_schema(
'set-shield',
state={'type': 'boolean'}, sync={'type': 'boolean'})
def process(self, resources):
client = local_session(self.manager.session_factory).client(
'shield', region_name='us-east-1')
model = self.manager.get_model()
protections = get_type_protections(client, self.manager.get_model())
protected_resources = {p['ResourceArn']: p for p in protections}
state = self.data.get('state', True)
if self.data.get('sync', False):
self.clear_stale(client, protections)
for arn, r in zip(self.manager.get_arns(resources), resources):
if state and arn in protected_resources:
continue
if state is False and arn in protected_resources:
ShieldRetry(
client.delete_protection,
ProtectionId=protected_resources[arn]['Id'])
continue
try:
ShieldRetry(
client.create_protection,
Name=r[model.name], ResourceArn=arn)
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceAlreadyExistsException':
continue
raise
def clear_stale(self, client, protections):
# Get all resources unfiltered
resources = self.manager.get_resource_manager(
self.manager.type).resources()
resource_arns = set(self.manager.get_arns(resources))
pmap = {}
# Only process stale resources in region for non global resources.
global_resource = getattr(self.manager.resource_type, 'global_resource', False)
for p in protections:
if not global_resource and self.manager.region not in p['ResourceArn']:
continue
pmap[p['ResourceArn']] = p
# Find any protections for resources that don't exist
stale = set(pmap).difference(resource_arns)
self.log.info("clearing %d stale protections", len(stale))
for s in stale:
ShieldRetry(
client.delete_protection, ProtectionId=pmap[s]['Id'])
|
apache-2.0
|
tfrancart/schemaorg
|
lib/rdflib/plugin.py
|
22
|
9342
|
"""
Plugin support for rdf.
There are a number of plugin points for rdf: parser, serializer,
store, query processor, and query result. Plugins can be registered
either through setuptools entry_points or by calling
rdf.plugin.register directly.
If you have a package that uses a setuptools based setup.py you can add the
following to your setup::
entry_points = {
'rdf.plugins.parser': [
'nt = rdf.plugins.parsers.nt:NTParser',
],
'rdf.plugins.serializer': [
'nt = rdf.plugins.serializers.NTSerializer:NTSerializer',
],
}
See the `setuptools dynamic discovery of services and plugins`__ for more
information.
.. __: http://peak.telecommunity.com/DevCenter/setuptools#dynamic-discovery-of-services-and-plugins
"""
from rdflib.store import Store
from rdflib.parser import Parser
from rdflib.serializer import Serializer
from rdflib.query import ResultParser, ResultSerializer, \
Processor, Result, UpdateProcessor
from rdflib.exceptions import Error
__all__ = [
'register', 'get', 'plugins', 'PluginException', 'Plugin', 'PKGPlugin']
entry_points = {'rdf.plugins.store': Store,
'rdf.plugins.serializer': Serializer,
'rdf.plugins.parser': Parser,
'rdf.plugins.resultparser': ResultParser,
'rdf.plugins.resultserializer': ResultSerializer,
'rdf.plugins.queryprocessor': Processor,
'rdf.plugins.queryresult': Result,
'rdf.plugins.updateprocessor': UpdateProcessor
}
_plugins = {}
class PluginException(Error):
pass
class Plugin(object):
def __init__(self, name, kind, module_path, class_name):
self.name = name
self.kind = kind
self.module_path = module_path
self.class_name = class_name
self._class = None
def getClass(self):
if self._class is None:
module = __import__(self.module_path, globals(), locals(), [""])
self._class = getattr(module, self.class_name)
return self._class
class PKGPlugin(Plugin):
def __init__(self, name, kind, ep):
self.name = name
self.kind = kind
self.ep = ep
self._class = None
def getClass(self):
if self._class is None:
self._class = self.ep.load()
return self._class
def register(name, kind, module_path, class_name):
"""
Register the plugin for (name, kind). The module_path and
class_name should be the path to a plugin class.
"""
p = Plugin(name, kind, module_path, class_name)
_plugins[(name, kind)] = p
def get(name, kind):
"""
Return the class for the specified (name, kind). Raises a
PluginException if unable to do so.
"""
try:
p = _plugins[(name, kind)]
except KeyError:
raise PluginException(
"No plugin registered for (%s, %s)" % (name, kind))
return p.getClass()
try:
from pkg_resources import iter_entry_points
except ImportError:
pass # TODO: log a message
else:
# add the plugins specified via pkg_resources' EntryPoints.
for entry_point, kind in entry_points.iteritems():
for ep in iter_entry_points(entry_point):
_plugins[(ep.name, kind)] = PKGPlugin(ep.name, kind, ep)
def plugins(name=None, kind=None):
"""
A generator of the plugins.
Pass in name and kind to filter... else leave None to match all.
"""
for p in _plugins.values():
if (name is None or name == p.name) and (
kind is None or kind == p.kind):
yield p
register(
'default', Store,
'rdflib.plugins.memory', 'IOMemory')
register(
'IOMemory', Store,
'rdflib.plugins.memory', 'IOMemory')
register(
'Auditable', Store,
'rdflib.plugins.stores.auditable', 'AuditableStore')
register(
'Concurrent', Store,
'rdflib.plugins.stores.concurrent', 'ConcurrentStore')
register(
'Sleepycat', Store,
'rdflib.plugins.sleepycat', 'Sleepycat')
register(
'SPARQLStore', Store,
'rdflib.plugins.stores.sparqlstore', 'SPARQLStore')
register(
'SPARQLUpdateStore', Store,
'rdflib.plugins.stores.sparqlstore', 'SPARQLUpdateStore')
register(
'application/rdf+xml', Serializer,
'rdflib.plugins.serializers.rdfxml', 'XMLSerializer')
register(
'xml', Serializer,
'rdflib.plugins.serializers.rdfxml', 'XMLSerializer')
register(
'text/n3', Serializer,
'rdflib.plugins.serializers.n3', 'N3Serializer')
register(
'n3', Serializer,
'rdflib.plugins.serializers.n3', 'N3Serializer')
register(
'text/turtle', Serializer,
'rdflib.plugins.serializers.turtle', 'TurtleSerializer')
register(
'turtle', Serializer,
'rdflib.plugins.serializers.turtle', 'TurtleSerializer')
register(
'trig', Serializer,
'rdflib.plugins.serializers.trig', 'TrigSerializer')
register(
'application/n-triples', Serializer,
'rdflib.plugins.serializers.nt', 'NTSerializer')
register(
'nt', Serializer,
'rdflib.plugins.serializers.nt', 'NTSerializer')
register(
'pretty-xml', Serializer,
'rdflib.plugins.serializers.rdfxml', 'PrettyXMLSerializer')
register(
'trix', Serializer,
'rdflib.plugins.serializers.trix', 'TriXSerializer')
register(
'application/trix', Serializer,
'rdflib.plugins.serializers.trix', 'TriXSerializer')
register(
"application/n-quads", Serializer,
'rdflib.plugins.serializers.nquads', 'NQuadsSerializer')
register(
"nquads", Serializer,
'rdflib.plugins.serializers.nquads', 'NQuadsSerializer')
register(
'application/rdf+xml', Parser,
'rdflib.plugins.parsers.rdfxml', 'RDFXMLParser')
register(
'xml', Parser,
'rdflib.plugins.parsers.rdfxml', 'RDFXMLParser')
register(
'text/n3', Parser,
'rdflib.plugins.parsers.notation3', 'N3Parser')
register(
'n3', Parser,
'rdflib.plugins.parsers.notation3', 'N3Parser')
register(
'text/turtle', Parser,
'rdflib.plugins.parsers.notation3', 'TurtleParser')
register(
'turtle', Parser,
'rdflib.plugins.parsers.notation3', 'TurtleParser')
register(
'application/n-triples', Parser,
'rdflib.plugins.parsers.nt', 'NTParser')
register(
'nt', Parser,
'rdflib.plugins.parsers.nt', 'NTParser')
register(
'application/n-quads', Parser,
'rdflib.plugins.parsers.nquads', 'NQuadsParser')
register(
'nquads', Parser,
'rdflib.plugins.parsers.nquads', 'NQuadsParser')
register(
'application/trix', Parser,
'rdflib.plugins.parsers.trix', 'TriXParser')
register(
'trix', Parser,
'rdflib.plugins.parsers.trix', 'TriXParser')
register(
'trig', Parser,
'rdflib.plugins.parsers.trig', 'TrigParser')
# The basic parsers: RDFa (by default, 1.1),
# microdata, and embedded turtle (a.k.a. hturtle)
register(
'hturtle', Parser,
'rdflib.plugins.parsers.hturtle', 'HTurtleParser')
register(
'rdfa', Parser,
'rdflib.plugins.parsers.structureddata', 'RDFaParser')
register(
'mdata', Parser,
'rdflib.plugins.parsers.structureddata', 'MicrodataParser')
register(
'microdata', Parser,
'rdflib.plugins.parsers.structureddata', 'MicrodataParser')
# A convenience to use the RDFa 1.0 syntax (although the parse method can
# be invoked with an rdfa_version keyword, too)
register(
'rdfa1.0', Parser,
'rdflib.plugins.parsers.structureddata', 'RDFa10Parser')
# Just for the completeness, if the user uses this
register(
'rdfa1.1', Parser,
'rdflib.plugins.parsers.structureddata', 'RDFaParser')
# An HTML file may contain both microdata, rdfa, or turtle. If the user
# wants them all, the parser below simply invokes all:
register(
'html', Parser,
'rdflib.plugins.parsers.structureddata', 'StructuredDataParser')
# Some media types are also bound to RDFa
register(
'application/svg+xml', Parser,
'rdflib.plugins.parsers.structureddata', 'RDFaParser')
register(
'application/xhtml+xml', Parser,
'rdflib.plugins.parsers.structureddata', 'RDFaParser')
# 'text/html' media type should be equivalent to html:
register(
'text/html', Parser,
'rdflib.plugins.parsers.structureddata', 'StructuredDataParser')
register(
'sparql', Result,
'rdflib.plugins.sparql.processor', 'SPARQLResult')
register(
'sparql', Processor,
'rdflib.plugins.sparql.processor', 'SPARQLProcessor')
register(
'sparql', UpdateProcessor,
'rdflib.plugins.sparql.processor', 'SPARQLUpdateProcessor')
register(
'xml', ResultSerializer,
'rdflib.plugins.sparql.results.xmlresults', 'XMLResultSerializer')
register(
'txt', ResultSerializer,
'rdflib.plugins.sparql.results.txtresults', 'TXTResultSerializer')
register(
'json', ResultSerializer,
'rdflib.plugins.sparql.results.jsonresults', 'JSONResultSerializer')
register(
'csv', ResultSerializer,
'rdflib.plugins.sparql.results.csvresults', 'CSVResultSerializer')
register(
'xml', ResultParser,
'rdflib.plugins.sparql.results.xmlresults', 'XMLResultParser')
register(
'json', ResultParser,
'rdflib.plugins.sparql.results.jsonresults', 'JSONResultParser')
register(
'csv', ResultParser,
'rdflib.plugins.sparql.results.csvresults', 'CSVResultParser')
register(
'tsv', ResultParser,
'rdflib.plugins.sparql.results.tsvresults', 'TSVResultParser')
|
apache-2.0
|
robwarm/gpaw-symm
|
gpaw/test/td_na2.py
|
1
|
2589
|
from ase import Atoms
from ase.units import Bohr
from gpaw import GPAW
from gpaw.tddft import TDDFT, photoabsorption_spectrum, \
LinearAbsorbingBoundary, P4AbsorbingBoundary, PML
from gpaw.test import equal
import os
# Sodium dimer, Na2
d = 1.5
atoms = Atoms(symbols='Na2',
positions=[( 0, 0, d),
( 0, 0,-d)],
pbc=False)
# Calculate ground state for TDDFT
# Larger box
atoms.center(vacuum=6.0)
# Larger grid spacing, LDA is ok
gs_calc = GPAW(nbands=1, h=0.35, xc='LDA', setups={'Na': '1'})
atoms.set_calculator(gs_calc)
e = atoms.get_potential_energy()
niter = gs_calc.get_number_of_iterations()
gs_calc.write('na2_gs.gpw', 'all')
# 16 fs run with 8.0 attosec time step
time_step = 8.0 # 8.0 as (1 as = 0.041341 autime)5D
iters = 10 # 2000 x 8 as => 16 fs
# Weak delta kick to z-direction
kick = [0,0,1e-3]
# TDDFT calculator
td_calc = TDDFT('na2_gs.gpw')
# Kick
td_calc.absorption_kick(kick)
# Propagate
td_calc.propagate(time_step, iters, 'na2_dmz.dat', 'na2_td.gpw')
# Linear absorption spectrum
photoabsorption_spectrum('na2_dmz.dat', 'na2_spectrum_z.dat', width=0.3)
iters = 3
# test restart
td_rest = TDDFT('na2_td.gpw')
td_rest.propagate(time_step, iters, 'na2_dmz2.dat', 'na2_td2.gpw')
# test restart
td_rest = TDDFT('na2_td.gpw', solver='BiCGStab')
td_rest.propagate(time_step, iters, 'na2_dmz3.dat', 'na2_td3.gpw')
# test absorbing boundary conditions
# linear imaginary potential
td_ipabs = TDDFT('na2_td.gpw')
ip_abc = LinearAbsorbingBoundary(5.0, 0.01, atoms.positions)
td_ipabs.set_absorbing_boundary(ip_abc)
td_ipabs.propagate(time_step, iters, 'na2_dmz4.dat', 'na2_td4.gpw')
# 4th order polynomial (1-(x^2-1)^2) imaginary potential
td_ip4abs = TDDFT('na2_td.gpw')
ip4_abc = P4AbsorbingBoundary(5.0, 0.03, atoms.positions, 3.0)
td_ip4abs.set_absorbing_boundary(ip4_abc)
td_ip4abs.propagate(time_step, iters, 'na2_dmz5.dat', 'na2_td5.gpw')
# perfectly matched layers
td_pmlabs = TDDFT('na2_td.gpw', solver='BiCGStab')
pml_abc = PML(100.0, 0.1)
td_pmlabs.set_absorbing_boundary(pml_abc)
td_pmlabs.propagate(time_step, iters, 'na2_dmz6.dat', 'na2_td6.gpw')
# photoabsorption_spectrum('na2_dmz2.dat', 'na2_spectrum_z2.dat', width=0.3)
#os.remove('na2_gs.gpw')
#os.remove('na2_td.gpw')
#os.remove('na2_dmz.dat')
#os.remove('na2_spectrum_z.dat')
#os.remove('na2_td2.gpw')
#os.remove('na2_dmz2.dat')
# os.remove('na2_spectrum_z2.dat')
#energy_tolerance = 0.0001
#niter_tolerance = 0
#equal(e, -1.24941356939, energy_tolerance) # svnversion 5252
#equal(niter, 21, niter_tolerance) # svnversion 5252
|
gpl-3.0
|
fabrique/django-sortedm2m
|
test_project/settings.py
|
1
|
3023
|
# Django settings for testsite project.
import os
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_ROOT, 'db.sqlite'),
},
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'define in local settings file'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'example.urls'
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'django.contrib.staticfiles',
'sortedm2m',
'sortedm2m_tests',
'sortedm2m_tests.migrations_tests',
'sortedm2m_tests.altersortedmanytomanyfield_tests',
'example.testapp',
)
MIGRATION_MODULES = {
'migrations_tests': 'sortedm2m_tests.migrations_tests.django17_migrations',
'altersortedmanytomanyfield_tests': 'sortedm2m_tests.altersortedmanytomanyfield_tests.django17_migrations',
}
import django
if django.VERSION >= (1, 6):
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Only test south for django versions lower as 1.7
# 1.7 introduced it's own migrations framework
if django.VERSION < (1, 7):
INSTALLED_APPS = INSTALLED_APPS + (
'south',
'test_south_support',
'test_south_support.south_support_new_model',
'test_south_support.south_support_new_field',
'test_south_support.south_support_custom_sort_field_name',
)
try:
from local_settings import *
except ImportError:
pass
|
bsd-3-clause
|
showell/zulip
|
tools/lib/test_server.py
|
1
|
3491
|
import os
import subprocess
import sys
import time
from contextlib import contextmanager
from typing import Iterator, Optional
# Verify the Zulip venv is available.
from tools.lib import sanity_check
sanity_check.check_venv(__file__)
import django
import requests
MAX_SERVER_WAIT = 180
TOOLS_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if TOOLS_DIR not in sys.path:
sys.path.insert(0, os.path.dirname(TOOLS_DIR))
from scripts.lib.zulip_tools import get_or_create_dev_uuid_var_path
from zerver.lib.test_fixtures import update_test_databases_if_required
def set_up_django(external_host: str) -> None:
os.environ['FULL_STACK_ZULIP_TEST'] = '1'
os.environ['EXTERNAL_HOST'] = external_host
os.environ["LOCAL_UPLOADS_DIR"] = get_or_create_dev_uuid_var_path(
'test-backend/test_uploads')
os.environ['DJANGO_SETTINGS_MODULE'] = 'zproject.test_settings'
django.setup()
os.environ['PYTHONUNBUFFERED'] = 'y'
def assert_server_running(server: "subprocess.Popen[bytes]", log_file: Optional[str]) -> None:
"""Get the exit code of the server, or None if it is still running."""
if server.poll() is not None:
message = 'Server died unexpectedly!'
if log_file:
message += f'\nSee {log_file}\n'
raise RuntimeError(message)
def server_is_up(server: "subprocess.Popen[bytes]", log_file: Optional[str]) -> bool:
assert_server_running(server, log_file)
try:
# We could get a 501 error if the reverse proxy is up but the Django app isn't.
# Note that zulipdev.com is mapped via DNS to 127.0.0.1.
return requests.get('http://zulipdev.com:9981/accounts/home').status_code == 200
except requests.RequestException:
return False
@contextmanager
def test_server_running(force: bool=False, external_host: str='testserver',
log_file: Optional[str]=None, dots: bool=False,
) -> Iterator[None]:
log = sys.stdout
if log_file:
if os.path.exists(log_file) and os.path.getsize(log_file) < 100000:
log = open(log_file, 'a')
log.write('\n\n')
else:
log = open(log_file, 'w')
set_up_django(external_host)
update_test_databases_if_required(rebuild_test_database=True)
# Run this not through the shell, so that we have the actual PID.
run_dev_server_command = ['tools/run-dev.py', '--test', '--streamlined']
if force:
run_dev_server_command.append('--force')
server = subprocess.Popen(run_dev_server_command,
stdout=log, stderr=log)
try:
# Wait for the server to start up.
sys.stdout.write('\nWaiting for test server (may take a while)')
if not dots:
sys.stdout.write('\n\n')
t = time.time()
while not server_is_up(server, log_file):
if dots:
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(0.4)
if time.time() - t > MAX_SERVER_WAIT:
raise Exception('Timeout waiting for server')
sys.stdout.write('\n\n--- SERVER IS UP! ---\n\n')
# DO OUR ACTUAL TESTING HERE!!!
yield
finally:
assert_server_running(server, log_file)
server.terminate()
server.wait()
if __name__ == '__main__':
# The code below is for testing this module works
with test_server_running():
print('\n\n SERVER IS UP!\n\n')
|
apache-2.0
|
40223148/2015cd_midterm
|
static/Brython3.1.0-20150301-090019/Lib/_strptime.py
|
518
|
21683
|
"""Strptime-related classes and functions.
CLASSES:
LocaleTime -- Discovers and stores locale-specific time information
TimeRE -- Creates regexes for pattern matching a string of text containing
time information
FUNCTIONS:
_getlang -- Figure out what language is being used for the locale
strptime -- Calculates the time struct represented by the passed-in string
"""
import time
import locale
import calendar
from re import compile as re_compile
from re import IGNORECASE
from re import escape as re_escape
from datetime import (date as datetime_date,
timedelta as datetime_timedelta,
timezone as datetime_timezone)
try:
from _thread import allocate_lock as _thread_allocate_lock
except ImportError:
from _dummy_thread import allocate_lock as _thread_allocate_lock
__all__ = []
def _getlang():
# Figure out what the current language is set to.
return locale.getlocale(locale.LC_TIME)
class LocaleTime(object):
"""Stores and handles locale-specific information related to time.
ATTRIBUTES:
f_weekday -- full weekday names (7-item list)
a_weekday -- abbreviated weekday names (7-item list)
f_month -- full month names (13-item list; dummy value in [0], which
is added by code)
a_month -- abbreviated month names (13-item list, dummy value in
[0], which is added by code)
am_pm -- AM/PM representation (2-item list)
LC_date_time -- format string for date/time representation (string)
LC_date -- format string for date representation (string)
LC_time -- format string for time representation (string)
timezone -- daylight- and non-daylight-savings timezone representation
(2-item list of sets)
lang -- Language used by instance (2-item tuple)
"""
def __init__(self):
"""Set all attributes.
Order of methods called matters for dependency reasons.
The locale language is set at the offset and then checked again before
exiting. This is to make sure that the attributes were not set with a
mix of information from more than one locale. This would most likely
happen when using threads where one thread calls a locale-dependent
function while another thread changes the locale while the function in
the other thread is still running. Proper coding would call for
locks to prevent changing the locale while locale-dependent code is
running. The check here is done in case someone does not think about
doing this.
Only other possible issue is if someone changed the timezone and did
not call tz.tzset . That is an issue for the programmer, though,
since changing the timezone is worthless without that call.
"""
self.lang = _getlang()
self.__calc_weekday()
self.__calc_month()
self.__calc_am_pm()
self.__calc_timezone()
self.__calc_date_time()
if _getlang() != self.lang:
raise ValueError("locale changed during initialization")
def __pad(self, seq, front):
# Add '' to seq to either the front (is True), else the back.
seq = list(seq)
if front:
seq.insert(0, '')
else:
seq.append('')
return seq
def __calc_weekday(self):
# Set self.a_weekday and self.f_weekday using the calendar
# module.
a_weekday = [calendar.day_abbr[i].lower() for i in range(7)]
f_weekday = [calendar.day_name[i].lower() for i in range(7)]
self.a_weekday = a_weekday
self.f_weekday = f_weekday
def __calc_month(self):
# Set self.f_month and self.a_month using the calendar module.
a_month = [calendar.month_abbr[i].lower() for i in range(13)]
f_month = [calendar.month_name[i].lower() for i in range(13)]
self.a_month = a_month
self.f_month = f_month
def __calc_am_pm(self):
# Set self.am_pm by using time.strftime().
# The magic date (1999,3,17,hour,44,55,2,76,0) is not really that
# magical; just happened to have used it everywhere else where a
# static date was needed.
am_pm = []
for hour in (1, 22):
time_tuple = time.struct_time((1999,3,17,hour,44,55,2,76,0))
am_pm.append(time.strftime("%p", time_tuple).lower())
self.am_pm = am_pm
def __calc_date_time(self):
# Set self.date_time, self.date, & self.time by using
# time.strftime().
# Use (1999,3,17,22,44,55,2,76,0) for magic date because the amount of
# overloaded numbers is minimized. The order in which searches for
# values within the format string is very important; it eliminates
# possible ambiguity for what something represents.
time_tuple = time.struct_time((1999,3,17,22,44,55,2,76,0))
date_time = [None, None, None]
date_time[0] = time.strftime("%c", time_tuple).lower()
date_time[1] = time.strftime("%x", time_tuple).lower()
date_time[2] = time.strftime("%X", time_tuple).lower()
replacement_pairs = [('%', '%%'), (self.f_weekday[2], '%A'),
(self.f_month[3], '%B'), (self.a_weekday[2], '%a'),
(self.a_month[3], '%b'), (self.am_pm[1], '%p'),
('1999', '%Y'), ('99', '%y'), ('22', '%H'),
('44', '%M'), ('55', '%S'), ('76', '%j'),
('17', '%d'), ('03', '%m'), ('3', '%m'),
# '3' needed for when no leading zero.
('2', '%w'), ('10', '%I')]
replacement_pairs.extend([(tz, "%Z") for tz_values in self.timezone
for tz in tz_values])
for offset,directive in ((0,'%c'), (1,'%x'), (2,'%X')):
current_format = date_time[offset]
for old, new in replacement_pairs:
# Must deal with possible lack of locale info
# manifesting itself as the empty string (e.g., Swedish's
# lack of AM/PM info) or a platform returning a tuple of empty
# strings (e.g., MacOS 9 having timezone as ('','')).
if old:
current_format = current_format.replace(old, new)
# If %W is used, then Sunday, 2005-01-03 will fall on week 0 since
# 2005-01-03 occurs before the first Monday of the year. Otherwise
# %U is used.
time_tuple = time.struct_time((1999,1,3,1,1,1,6,3,0))
if '00' in time.strftime(directive, time_tuple):
U_W = '%W'
else:
U_W = '%U'
date_time[offset] = current_format.replace('11', U_W)
self.LC_date_time = date_time[0]
self.LC_date = date_time[1]
self.LC_time = date_time[2]
def __calc_timezone(self):
# Set self.timezone by using time.tzname.
# Do not worry about possibility of time.tzname[0] == timetzname[1]
# and time.daylight; handle that in strptime .
#try:
#time.tzset()
#except AttributeError:
#pass
no_saving = frozenset(["utc", "gmt", time.tzname[0].lower()])
if time.daylight:
has_saving = frozenset([time.tzname[1].lower()])
else:
has_saving = frozenset()
self.timezone = (no_saving, has_saving)
class TimeRE(dict):
"""Handle conversion from format directives to regexes."""
def __init__(self, locale_time=None):
"""Create keys/values.
Order of execution is important for dependency reasons.
"""
if locale_time:
self.locale_time = locale_time
else:
self.locale_time = LocaleTime()
base = super()
base.__init__({
# The " \d" part of the regex is to make %c from ANSI C work
'd': r"(?P<d>3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])",
'f': r"(?P<f>[0-9]{1,6})",
'H': r"(?P<H>2[0-3]|[0-1]\d|\d)",
'I': r"(?P<I>1[0-2]|0[1-9]|[1-9])",
'j': r"(?P<j>36[0-6]|3[0-5]\d|[1-2]\d\d|0[1-9]\d|00[1-9]|[1-9]\d|0[1-9]|[1-9])",
'm': r"(?P<m>1[0-2]|0[1-9]|[1-9])",
'M': r"(?P<M>[0-5]\d|\d)",
'S': r"(?P<S>6[0-1]|[0-5]\d|\d)",
'U': r"(?P<U>5[0-3]|[0-4]\d|\d)",
'w': r"(?P<w>[0-6])",
# W is set below by using 'U'
'y': r"(?P<y>\d\d)",
#XXX: Does 'Y' need to worry about having less or more than
# 4 digits?
'Y': r"(?P<Y>\d\d\d\d)",
'z': r"(?P<z>[+-]\d\d[0-5]\d)",
'A': self.__seqToRE(self.locale_time.f_weekday, 'A'),
'a': self.__seqToRE(self.locale_time.a_weekday, 'a'),
'B': self.__seqToRE(self.locale_time.f_month[1:], 'B'),
'b': self.__seqToRE(self.locale_time.a_month[1:], 'b'),
'p': self.__seqToRE(self.locale_time.am_pm, 'p'),
'Z': self.__seqToRE((tz for tz_names in self.locale_time.timezone
for tz in tz_names),
'Z'),
'%': '%'})
base.__setitem__('W', base.__getitem__('U').replace('U', 'W'))
base.__setitem__('c', self.pattern(self.locale_time.LC_date_time))
base.__setitem__('x', self.pattern(self.locale_time.LC_date))
base.__setitem__('X', self.pattern(self.locale_time.LC_time))
def __seqToRE(self, to_convert, directive):
"""Convert a list to a regex string for matching a directive.
Want possible matching values to be from longest to shortest. This
prevents the possibility of a match occurring for a value that also
a substring of a larger value that should have matched (e.g., 'abc'
matching when 'abcdef' should have been the match).
"""
to_convert = sorted(to_convert, key=len, reverse=True)
for value in to_convert:
if value != '':
break
else:
return ''
regex = '|'.join(re_escape(stuff) for stuff in to_convert)
regex = '(?P<%s>%s' % (directive, regex)
return '%s)' % regex
def pattern(self, format):
"""Return regex pattern for the format string.
Need to make sure that any characters that might be interpreted as
regex syntax are escaped.
"""
processed_format = ''
# The sub() call escapes all characters that might be misconstrued
# as regex syntax. Cannot use re.escape since we have to deal with
# format directives (%m, etc.).
regex_chars = re_compile(r"([\\.^$*+?\(\){}\[\]|])")
format = regex_chars.sub(r"\\\1", format)
whitespace_replacement = re_compile('\s+')
format = whitespace_replacement.sub('\s+', format)
while '%' in format:
directive_index = format.index('%')+1
processed_format = "%s%s%s" % (processed_format,
format[:directive_index-1],
self[format[directive_index]])
format = format[directive_index+1:]
return "%s%s" % (processed_format, format)
def compile(self, format):
"""Return a compiled re object for the format string."""
return re_compile(self.pattern(format), IGNORECASE)
_cache_lock = _thread_allocate_lock()
# DO NOT modify _TimeRE_cache or _regex_cache without acquiring the cache lock
# first!
_TimeRE_cache = TimeRE()
_CACHE_MAX_SIZE = 5 # Max number of regexes stored in _regex_cache
_regex_cache = {}
def _calc_julian_from_U_or_W(year, week_of_year, day_of_week, week_starts_Mon):
"""Calculate the Julian day based on the year, week of the year, and day of
the week, with week_start_day representing whether the week of the year
assumes the week starts on Sunday or Monday (6 or 0)."""
first_weekday = datetime_date(year, 1, 1).weekday()
# If we are dealing with the %U directive (week starts on Sunday), it's
# easier to just shift the view to Sunday being the first day of the
# week.
if not week_starts_Mon:
first_weekday = (first_weekday + 1) % 7
day_of_week = (day_of_week + 1) % 7
# Need to watch out for a week 0 (when the first day of the year is not
# the same as that specified by %U or %W).
week_0_length = (7 - first_weekday) % 7
if week_of_year == 0:
return 1 + day_of_week - first_weekday
else:
days_to_week = week_0_length + (7 * (week_of_year - 1))
return 1 + days_to_week + day_of_week
def _strptime(data_string, format="%a %b %d %H:%M:%S %Y"):
"""Return a 2-tuple consisting of a time struct and an int containing
the number of microseconds based on the input string and the
format string."""
for index, arg in enumerate([data_string, format]):
if not isinstance(arg, str):
msg = "strptime() argument {} must be str, not {}"
raise TypeError(msg.format(index, type(arg)))
global _TimeRE_cache, _regex_cache
with _cache_lock:
if _getlang() != _TimeRE_cache.locale_time.lang:
_TimeRE_cache = TimeRE()
_regex_cache.clear()
if len(_regex_cache) > _CACHE_MAX_SIZE:
_regex_cache.clear()
locale_time = _TimeRE_cache.locale_time
format_regex = _regex_cache.get(format)
if not format_regex:
try:
format_regex = _TimeRE_cache.compile(format)
# KeyError raised when a bad format is found; can be specified as
# \\, in which case it was a stray % but with a space after it
except KeyError as err:
bad_directive = err.args[0]
if bad_directive == "\\":
bad_directive = "%"
del err
raise ValueError("'%s' is a bad directive in format '%s'" %
(bad_directive, format)) from None
# IndexError only occurs when the format string is "%"
except IndexError:
raise ValueError("stray %% in format '%s'" % format) from None
_regex_cache[format] = format_regex
found = format_regex.match(data_string)
if not found:
raise ValueError("time data %r does not match format %r" %
(data_string, format))
if len(data_string) != found.end():
raise ValueError("unconverted data remains: %s" %
data_string[found.end():])
year = None
month = day = 1
hour = minute = second = fraction = 0
tz = -1
tzoffset = None
# Default to -1 to signify that values not known; not critical to have,
# though
week_of_year = -1
week_of_year_start = -1
# weekday and julian defaulted to -1 so as to signal need to calculate
# values
weekday = julian = -1
found_dict = found.groupdict()
for group_key in found_dict.keys():
# Directives not explicitly handled below:
# c, x, X
# handled by making out of other directives
# U, W
# worthless without day of the week
if group_key == 'y':
year = int(found_dict['y'])
# Open Group specification for strptime() states that a %y
#value in the range of [00, 68] is in the century 2000, while
#[69,99] is in the century 1900
if year <= 68:
year += 2000
else:
year += 1900
elif group_key == 'Y':
year = int(found_dict['Y'])
elif group_key == 'm':
month = int(found_dict['m'])
elif group_key == 'B':
month = locale_time.f_month.index(found_dict['B'].lower())
elif group_key == 'b':
month = locale_time.a_month.index(found_dict['b'].lower())
elif group_key == 'd':
day = int(found_dict['d'])
elif group_key == 'H':
hour = int(found_dict['H'])
elif group_key == 'I':
hour = int(found_dict['I'])
ampm = found_dict.get('p', '').lower()
# If there was no AM/PM indicator, we'll treat this like AM
if ampm in ('', locale_time.am_pm[0]):
# We're in AM so the hour is correct unless we're
# looking at 12 midnight.
# 12 midnight == 12 AM == hour 0
if hour == 12:
hour = 0
elif ampm == locale_time.am_pm[1]:
# We're in PM so we need to add 12 to the hour unless
# we're looking at 12 noon.
# 12 noon == 12 PM == hour 12
if hour != 12:
hour += 12
elif group_key == 'M':
minute = int(found_dict['M'])
elif group_key == 'S':
second = int(found_dict['S'])
elif group_key == 'f':
s = found_dict['f']
# Pad to always return microseconds.
s += "0" * (6 - len(s))
fraction = int(s)
elif group_key == 'A':
weekday = locale_time.f_weekday.index(found_dict['A'].lower())
elif group_key == 'a':
weekday = locale_time.a_weekday.index(found_dict['a'].lower())
elif group_key == 'w':
weekday = int(found_dict['w'])
if weekday == 0:
weekday = 6
else:
weekday -= 1
elif group_key == 'j':
julian = int(found_dict['j'])
elif group_key in ('U', 'W'):
week_of_year = int(found_dict[group_key])
if group_key == 'U':
# U starts week on Sunday.
week_of_year_start = 6
else:
# W starts week on Monday.
week_of_year_start = 0
elif group_key == 'z':
z = found_dict['z']
tzoffset = int(z[1:3]) * 60 + int(z[3:5])
if z.startswith("-"):
tzoffset = -tzoffset
elif group_key == 'Z':
# Since -1 is default value only need to worry about setting tz if
# it can be something other than -1.
found_zone = found_dict['Z'].lower()
for value, tz_values in enumerate(locale_time.timezone):
if found_zone in tz_values:
# Deal with bad locale setup where timezone names are the
# same and yet time.daylight is true; too ambiguous to
# be able to tell what timezone has daylight savings
if (time.tzname[0] == time.tzname[1] and
time.daylight and found_zone not in ("utc", "gmt")):
break
else:
tz = value
break
leap_year_fix = False
if year is None and month == 2 and day == 29:
year = 1904 # 1904 is first leap year of 20th century
leap_year_fix = True
elif year is None:
year = 1900
# If we know the week of the year and what day of that week, we can figure
# out the Julian day of the year.
if julian == -1 and week_of_year != -1 and weekday != -1:
week_starts_Mon = True if week_of_year_start == 0 else False
julian = _calc_julian_from_U_or_W(year, week_of_year, weekday,
week_starts_Mon)
# Cannot pre-calculate datetime_date() since can change in Julian
# calculation and thus could have different value for the day of the week
# calculation.
if julian == -1:
# Need to add 1 to result since first day of the year is 1, not 0.
julian = datetime_date(year, month, day).toordinal() - \
datetime_date(year, 1, 1).toordinal() + 1
else: # Assume that if they bothered to include Julian day it will
# be accurate.
datetime_result = datetime_date.fromordinal((julian - 1) + datetime_date(year, 1, 1).toordinal())
year = datetime_result.year
month = datetime_result.month
day = datetime_result.day
if weekday == -1:
weekday = datetime_date(year, month, day).weekday()
# Add timezone info
tzname = found_dict.get("Z")
if tzoffset is not None:
gmtoff = tzoffset * 60
else:
gmtoff = None
if leap_year_fix:
# the caller didn't supply a year but asked for Feb 29th. We couldn't
# use the default of 1900 for computations. We set it back to ensure
# that February 29th is smaller than March 1st.
year = 1900
return (year, month, day,
hour, minute, second,
weekday, julian, tz, tzname, gmtoff), fraction
def _strptime_time(data_string, format="%a %b %d %H:%M:%S %Y"):
"""Return a time struct based on the input string and the
format string."""
tt = _strptime(data_string, format)[0]
return time.struct_time(tt[:time._STRUCT_TM_ITEMS])
def _strptime_datetime(cls, data_string, format="%a %b %d %H:%M:%S %Y"):
"""Return a class cls instance based on the input string and the
format string."""
tt, fraction = _strptime(data_string, format)
tzname, gmtoff = tt[-2:]
args = tt[:6] + (fraction,)
if gmtoff is not None:
tzdelta = datetime_timedelta(seconds=gmtoff)
if tzname:
tz = datetime_timezone(tzdelta, tzname)
else:
tz = datetime_timezone(tzdelta)
args += (tz,)
return cls(*args)
|
gpl-3.0
|
morenopc/edx-platform
|
common/djangoapps/track/tests/test_middleware.py
|
11
|
4977
|
import re
from mock import patch
from mock import sentinel
from django.contrib.auth.models import User
from django.contrib.sessions.middleware import SessionMiddleware
from django.test import TestCase
from django.test.client import RequestFactory
from django.test.utils import override_settings
from eventtracking import tracker
from track.middleware import TrackMiddleware
class TrackMiddlewareTestCase(TestCase):
def setUp(self):
self.track_middleware = TrackMiddleware()
self.request_factory = RequestFactory()
patcher = patch('track.views.server_track')
self.mock_server_track = patcher.start()
self.addCleanup(patcher.stop)
def test_normal_request(self):
request = self.request_factory.get('/somewhere')
self.track_middleware.process_request(request)
self.assertTrue(self.mock_server_track.called)
def test_default_filters_do_not_render_view(self):
for url in ['/event', '/event/1', '/login', '/heartbeat']:
request = self.request_factory.get(url)
self.track_middleware.process_request(request)
self.assertFalse(self.mock_server_track.called)
self.mock_server_track.reset_mock()
@override_settings(TRACKING_IGNORE_URL_PATTERNS=[])
def test_reading_filtered_urls_from_settings(self):
request = self.request_factory.get('/event')
self.track_middleware.process_request(request)
self.assertTrue(self.mock_server_track.called)
@override_settings(TRACKING_IGNORE_URL_PATTERNS=[r'^/some/excluded.*'])
def test_anchoring_of_patterns_at_beginning(self):
request = self.request_factory.get('/excluded')
self.track_middleware.process_request(request)
self.assertTrue(self.mock_server_track.called)
self.mock_server_track.reset_mock()
request = self.request_factory.get('/some/excluded/url')
self.track_middleware.process_request(request)
self.assertFalse(self.mock_server_track.called)
def test_default_request_context(self):
context = self.get_context_for_path('/courses/')
self.assertEquals(context, {
'user_id': '',
'session': '',
'username': '',
'ip': '127.0.0.1',
'host': 'testserver',
'agent': '',
'path': '/courses/',
'org_id': '',
'course_id': '',
})
def get_context_for_path(self, path):
"""Extract the generated event tracking context for a given request for the given path."""
request = self.request_factory.get(path)
return self.get_context_for_request(request)
def get_context_for_request(self, request):
"""Extract the generated event tracking context for the given request."""
self.track_middleware.process_request(request)
try:
captured_context = tracker.get_tracker().resolve_context()
finally:
self.track_middleware.process_response(request, None)
self.assertEquals(
tracker.get_tracker().resolve_context(),
{}
)
return captured_context
def test_request_in_course_context(self):
captured_context = self.get_context_for_path('/courses/test_org/test_course/test_run/foo')
expected_context_subset = {
'course_id': 'test_org/test_course/test_run',
'org_id': 'test_org',
}
self.assert_dict_subset(captured_context, expected_context_subset)
def assert_dict_subset(self, superset, subset):
"""Assert that the superset dict contains all of the key-value pairs found in the subset dict."""
for key, expected_value in subset.iteritems():
self.assertEquals(superset[key], expected_value)
def test_request_with_user(self):
user_id = 1
username = sentinel.username
request = self.request_factory.get('/courses/')
request.user = User(pk=user_id, username=username)
context = self.get_context_for_request(request)
self.assert_dict_subset(context, {
'user_id': user_id,
'username': username,
})
def test_request_with_session(self):
request = self.request_factory.get('/courses/')
SessionMiddleware().process_request(request)
request.session.save()
session_key = request.session.session_key
context = self.get_context_for_request(request)
self.assert_dict_subset(context, {
'session': session_key,
})
def test_request_headers(self):
ip_address = '10.0.0.0'
user_agent = 'UnitTest/1.0'
factory = RequestFactory(REMOTE_ADDR=ip_address, HTTP_USER_AGENT=user_agent)
request = factory.get('/some-path')
context = self.get_context_for_request(request)
self.assert_dict_subset(context, {
'ip': ip_address,
'agent': user_agent,
})
|
agpl-3.0
|
bonitadecker77/python-for-android
|
python3-alpha/python3-src/Lib/distutils/tests/test_core.py
|
138
|
3027
|
"""Tests for distutils.core."""
import io
import distutils.core
import os
import shutil
import sys
import test.support
from test.support import captured_stdout, run_unittest
import unittest
from distutils.tests import support
# setup script that uses __file__
setup_using___file__ = """\
__file__
from distutils.core import setup
setup()
"""
setup_prints_cwd = """\
import os
print(os.getcwd())
from distutils.core import setup
setup()
"""
class CoreTestCase(support.EnvironGuard, unittest.TestCase):
def setUp(self):
super(CoreTestCase, self).setUp()
self.old_stdout = sys.stdout
self.cleanup_testfn()
self.old_argv = sys.argv, sys.argv[:]
def tearDown(self):
sys.stdout = self.old_stdout
self.cleanup_testfn()
sys.argv = self.old_argv[0]
sys.argv[:] = self.old_argv[1]
super(CoreTestCase, self).tearDown()
def cleanup_testfn(self):
path = test.support.TESTFN
if os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
shutil.rmtree(path)
def write_setup(self, text, path=test.support.TESTFN):
f = open(path, "w")
try:
f.write(text)
finally:
f.close()
return path
def test_run_setup_provides_file(self):
# Make sure the script can use __file__; if that's missing, the test
# setup.py script will raise NameError.
distutils.core.run_setup(
self.write_setup(setup_using___file__))
def test_run_setup_uses_current_dir(self):
# This tests that the setup script is run with the current directory
# as its own current directory; this was temporarily broken by a
# previous patch when TESTFN did not use the current directory.
sys.stdout = io.StringIO()
cwd = os.getcwd()
# Create a directory and write the setup.py file there:
os.mkdir(test.support.TESTFN)
setup_py = os.path.join(test.support.TESTFN, "setup.py")
distutils.core.run_setup(
self.write_setup(setup_prints_cwd, path=setup_py))
output = sys.stdout.getvalue()
if output.endswith("\n"):
output = output[:-1]
self.assertEqual(cwd, output)
def test_debug_mode(self):
# this covers the code called when DEBUG is set
sys.argv = ['setup.py', '--name']
with captured_stdout() as stdout:
distutils.core.setup(name='bar')
stdout.seek(0)
self.assertEqual(stdout.read(), 'bar\n')
distutils.core.DEBUG = True
try:
with captured_stdout() as stdout:
distutils.core.setup(name='bar')
finally:
distutils.core.DEBUG = False
stdout.seek(0)
wanted = "options (after parsing config files):\n"
self.assertEqual(stdout.readlines()[0], wanted)
def test_suite():
return unittest.makeSuite(CoreTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
|
apache-2.0
|
Drooids/odoo
|
addons/product_expiry/__init__.py
|
442
|
1053
|
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import product_expiry
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
Maximilian-Reuter/SickRage-1
|
lib/github/Repository.py
|
21
|
92750
|
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Christopher Gilbert <[email protected]> #
# Copyright 2012 Steve English <[email protected]> #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 AKFish <[email protected]> #
# Copyright 2013 Adrian Petrescu <[email protected]> #
# Copyright 2013 Mark Roddy <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# Copyright 2013 martinqt <[email protected]> #
# Copyright 2015 Jannis Gebauer <[email protected]> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import urllib
import datetime
import github.GithubObject
import github.PaginatedList
import github.Branch
import github.IssueEvent
import github.ContentFile
import github.Label
import github.GitBlob
import github.Organization
import github.GitRef
import github.GitRelease
import github.Issue
import github.Repository
import github.PullRequest
import github.RepositoryKey
import github.NamedUser
import github.Milestone
import github.Comparison
import github.CommitComment
import github.GitCommit
import github.Team
import github.Commit
import github.GitTree
import github.Hook
import github.Tag
import github.GitTag
import github.Download
import github.Permissions
import github.Event
import github.Legacy
import github.StatsContributor
import github.StatsCommitActivity
import github.StatsCodeFrequency
import github.StatsParticipation
import github.StatsPunchCard
import github.Stargazer
class Repository(github.GithubObject.CompletableGithubObject):
"""
This class represents Repositorys. The reference can be found here http://developer.github.com/v3/repos/
"""
@property
def archive_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._archive_url)
return self._archive_url.value
@property
def assignees_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._assignees_url)
return self._assignees_url.value
@property
def blobs_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._blobs_url)
return self._blobs_url.value
@property
def branches_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._branches_url)
return self._branches_url.value
@property
def clone_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._clone_url)
return self._clone_url.value
@property
def collaborators_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._collaborators_url)
return self._collaborators_url.value
@property
def comments_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._comments_url)
return self._comments_url.value
@property
def commits_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._commits_url)
return self._commits_url.value
@property
def compare_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._compare_url)
return self._compare_url.value
@property
def contents_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._contents_url)
return self._contents_url.value
@property
def contributors_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._contributors_url)
return self._contributors_url.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def default_branch(self):
"""
:type: string
"""
self._completeIfNotSet(self._default_branch)
return self._default_branch.value
@property
def description(self):
"""
:type: string
"""
self._completeIfNotSet(self._description)
return self._description.value
@property
def downloads_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._downloads_url)
return self._downloads_url.value
@property
def events_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._events_url)
return self._events_url.value
@property
def fork(self):
"""
:type: bool
"""
self._completeIfNotSet(self._fork)
return self._fork.value
@property
def forks(self):
"""
:type: integer
"""
self._completeIfNotSet(self._forks)
return self._forks.value
@property
def forks_count(self):
"""
:type: integer
"""
self._completeIfNotSet(self._forks_count)
return self._forks_count.value
@property
def forks_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._forks_url)
return self._forks_url.value
@property
def full_name(self):
"""
:type: string
"""
self._completeIfNotSet(self._full_name)
return self._full_name.value
@property
def git_commits_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._git_commits_url)
return self._git_commits_url.value
@property
def git_refs_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._git_refs_url)
return self._git_refs_url.value
@property
def git_tags_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._git_tags_url)
return self._git_tags_url.value
@property
def git_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._git_url)
return self._git_url.value
@property
def has_downloads(self):
"""
:type: bool
"""
self._completeIfNotSet(self._has_downloads)
return self._has_downloads.value
@property
def has_issues(self):
"""
:type: bool
"""
self._completeIfNotSet(self._has_issues)
return self._has_issues.value
@property
def has_wiki(self):
"""
:type: bool
"""
self._completeIfNotSet(self._has_wiki)
return self._has_wiki.value
@property
def homepage(self):
"""
:type: string
"""
self._completeIfNotSet(self._homepage)
return self._homepage.value
@property
def hooks_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._hooks_url)
return self._hooks_url.value
@property
def html_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def issue_comment_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._issue_comment_url)
return self._issue_comment_url.value
@property
def issue_events_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._issue_events_url)
return self._issue_events_url.value
@property
def issues_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._issues_url)
return self._issues_url.value
@property
def keys_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._keys_url)
return self._keys_url.value
@property
def labels_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._labels_url)
return self._labels_url.value
@property
def language(self):
"""
:type: string
"""
self._completeIfNotSet(self._language)
return self._language.value
@property
def languages_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._languages_url)
return self._languages_url.value
@property
def master_branch(self):
"""
:type: string
"""
self._completeIfNotSet(self._master_branch)
return self._master_branch.value
@property
def merges_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._merges_url)
return self._merges_url.value
@property
def milestones_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._milestones_url)
return self._milestones_url.value
@property
def mirror_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._mirror_url)
return self._mirror_url.value
@property
def name(self):
"""
:type: string
"""
self._completeIfNotSet(self._name)
return self._name.value
@property
def network_count(self):
"""
:type: integer
"""
self._completeIfNotSet(self._network_count)
return self._network_count.value
@property
def notifications_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._notifications_url)
return self._notifications_url.value
@property
def open_issues(self):
"""
:type: integer
"""
self._completeIfNotSet(self._open_issues)
return self._open_issues.value
@property
def open_issues_count(self):
"""
:type: integer
"""
self._completeIfNotSet(self._open_issues_count)
return self._open_issues_count.value
@property
def organization(self):
"""
:type: :class:`github.Organization.Organization`
"""
self._completeIfNotSet(self._organization)
return self._organization.value
@property
def owner(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
self._completeIfNotSet(self._owner)
return self._owner.value
@property
def parent(self):
"""
:type: :class:`github.Repository.Repository`
"""
self._completeIfNotSet(self._parent)
return self._parent.value
@property
def permissions(self):
"""
:type: :class:`github.Permissions.Permissions`
"""
self._completeIfNotSet(self._permissions)
return self._permissions.value
@property
def private(self):
"""
:type: bool
"""
self._completeIfNotSet(self._private)
return self._private.value
@property
def pulls_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._pulls_url)
return self._pulls_url.value
@property
def pushed_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._pushed_at)
return self._pushed_at.value
@property
def size(self):
"""
:type: integer
"""
self._completeIfNotSet(self._size)
return self._size.value
@property
def source(self):
"""
:type: :class:`github.Repository.Repository`
"""
self._completeIfNotSet(self._source)
return self._source.value
@property
def ssh_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._ssh_url)
return self._ssh_url.value
@property
def stargazers_count(self):
"""
:type: integer
"""
self._completeIfNotSet(self._stargazers_count) # pragma no cover (Should be covered)
return self._stargazers_count.value # pragma no cover (Should be covered)
@property
def stargazers_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._stargazers_url)
return self._stargazers_url.value
@property
def statuses_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._statuses_url)
return self._statuses_url.value
@property
def subscribers_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._subscribers_url)
return self._subscribers_url.value
@property
def subscription_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._subscription_url)
return self._subscription_url.value
@property
def svn_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._svn_url)
return self._svn_url.value
@property
def tags_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._tags_url)
return self._tags_url.value
@property
def teams_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._teams_url)
return self._teams_url.value
@property
def trees_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._trees_url)
return self._trees_url.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
@property
def watchers(self):
"""
:type: integer
"""
self._completeIfNotSet(self._watchers)
return self._watchers.value
@property
def watchers_count(self):
"""
:type: integer
"""
self._completeIfNotSet(self._watchers_count)
return self._watchers_count.value
def add_to_collaborators(self, collaborator):
"""
:calls: `PUT /repos/:owner/:repo/collaborators/:user <http://developer.github.com/v3/repos/collaborators>`_
:param collaborator: string or :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(collaborator, github.NamedUser.NamedUser) or isinstance(collaborator, (str, unicode)), collaborator
if isinstance(collaborator, github.NamedUser.NamedUser):
collaborator = collaborator._identity
headers, data = self._requester.requestJsonAndCheck(
"PUT",
self.url + "/collaborators/" + collaborator
)
def compare(self, base, head):
"""
:calls: `GET /repos/:owner/:repo/compare/:base...:head <http://developer.github.com/v3/repos/commits>`_
:param base: string
:param head: string
:rtype: :class:`github.Comparison.Comparison`
"""
assert isinstance(base, (str, unicode)), base
assert isinstance(head, (str, unicode)), head
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/compare/" + base + "..." + head
)
return github.Comparison.Comparison(self._requester, headers, data, completed=True)
def create_git_blob(self, content, encoding):
"""
:calls: `POST /repos/:owner/:repo/git/blobs <http://developer.github.com/v3/git/blobs>`_
:param content: string
:param encoding: string
:rtype: :class:`github.GitBlob.GitBlob`
"""
assert isinstance(content, (str, unicode)), content
assert isinstance(encoding, (str, unicode)), encoding
post_parameters = {
"content": content,
"encoding": encoding,
}
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/git/blobs",
input=post_parameters
)
return github.GitBlob.GitBlob(self._requester, headers, data, completed=True)
def create_git_commit(self, message, tree, parents, author=github.GithubObject.NotSet, committer=github.GithubObject.NotSet):
"""
:calls: `POST /repos/:owner/:repo/git/commits <http://developer.github.com/v3/git/commits>`_
:param message: string
:param tree: :class:`github.GitTree.GitTree`
:param parents: list of :class:`github.GitCommit.GitCommit`
:param author: :class:`github.InputGitAuthor.InputGitAuthor`
:param committer: :class:`github.InputGitAuthor.InputGitAuthor`
:rtype: :class:`github.GitCommit.GitCommit`
"""
assert isinstance(message, (str, unicode)), message
assert isinstance(tree, github.GitTree.GitTree), tree
assert all(isinstance(element, github.GitCommit.GitCommit) for element in parents), parents
assert author is github.GithubObject.NotSet or isinstance(author, github.InputGitAuthor), author
assert committer is github.GithubObject.NotSet or isinstance(committer, github.InputGitAuthor), committer
post_parameters = {
"message": message,
"tree": tree._identity,
"parents": [element._identity for element in parents],
}
if author is not github.GithubObject.NotSet:
post_parameters["author"] = author._identity
if committer is not github.GithubObject.NotSet:
post_parameters["committer"] = committer._identity
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/git/commits",
input=post_parameters
)
return github.GitCommit.GitCommit(self._requester, headers, data, completed=True)
def create_git_ref(self, ref, sha):
"""
:calls: `POST /repos/:owner/:repo/git/refs <http://developer.github.com/v3/git/refs>`_
:param ref: string
:param sha: string
:rtype: :class:`github.GitRef.GitRef`
"""
assert isinstance(ref, (str, unicode)), ref
assert isinstance(sha, (str, unicode)), sha
post_parameters = {
"ref": ref,
"sha": sha,
}
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/git/refs",
input=post_parameters
)
return github.GitRef.GitRef(self._requester, headers, data, completed=True)
def create_git_tag_and_release(self, tag, tag_message, release_name, release_message, object, type, tagger=github.GithubObject.NotSet, draft=False, prerelease=False):
self.create_git_tag(tag, tag_message, object, type, tagger)
return self.create_git_release(tag, release_name, release_message, draft, prerelease)
def create_git_release(self, tag, name, message, draft=False, prerelease=False):
assert isinstance(tag, (str, unicode)), tag
assert isinstance(name, (str, unicode)), name
assert isinstance(message, (str, unicode)), message
assert isinstance(draft, bool), draft
assert isinstance(prerelease, bool), prerelease
post_parameters = {
"tag_name": tag,
"name": name,
"body": message,
"draft": draft,
"prerelease": prerelease,
}
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/releases",
input=post_parameters
)
return github.GitRelease.GitRelease(self._requester, headers, data, completed=True)
def create_git_tag(self, tag, message, object, type, tagger=github.GithubObject.NotSet):
"""
:calls: `POST /repos/:owner/:repo/git/tags <http://developer.github.com/v3/git/tags>`_
:param tag: string
:param message: string
:param object: string
:param type: string
:param tagger: :class:`github.InputGitAuthor.InputGitAuthor`
:rtype: :class:`github.GitTag.GitTag`
"""
assert isinstance(tag, (str, unicode)), tag
assert isinstance(message, (str, unicode)), message
assert isinstance(object, (str, unicode)), object
assert isinstance(type, (str, unicode)), type
assert tagger is github.GithubObject.NotSet or isinstance(tagger, github.InputGitAuthor), tagger
post_parameters = {
"tag": tag,
"message": message,
"object": object,
"type": type,
}
if tagger is not github.GithubObject.NotSet:
post_parameters["tagger"] = tagger._identity
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/git/tags",
input=post_parameters
)
return github.GitTag.GitTag(self._requester, headers, data, completed=True)
def create_git_tree(self, tree, base_tree=github.GithubObject.NotSet):
"""
:calls: `POST /repos/:owner/:repo/git/trees <http://developer.github.com/v3/git/trees>`_
:param tree: list of :class:`github.InputGitTreeElement.InputGitTreeElement`
:param base_tree: :class:`github.GitTree.GitTree`
:rtype: :class:`github.GitTree.GitTree`
"""
assert all(isinstance(element, github.InputGitTreeElement) for element in tree), tree
assert base_tree is github.GithubObject.NotSet or isinstance(base_tree, github.GitTree.GitTree), base_tree
post_parameters = {
"tree": [element._identity for element in tree],
}
if base_tree is not github.GithubObject.NotSet:
post_parameters["base_tree"] = base_tree._identity
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/git/trees",
input=post_parameters
)
return github.GitTree.GitTree(self._requester, headers, data, completed=True)
def create_hook(self, name, config, events=github.GithubObject.NotSet, active=github.GithubObject.NotSet):
"""
:calls: `POST /repos/:owner/:repo/hooks <http://developer.github.com/v3/repos/hooks>`_
:param name: string
:param config: dict
:param events: list of string
:param active: bool
:rtype: :class:`github.Hook.Hook`
"""
assert isinstance(name, (str, unicode)), name
assert isinstance(config, dict), config
assert events is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) for element in events), events
assert active is github.GithubObject.NotSet or isinstance(active, bool), active
post_parameters = {
"name": name,
"config": config,
}
if events is not github.GithubObject.NotSet:
post_parameters["events"] = events
if active is not github.GithubObject.NotSet:
post_parameters["active"] = active
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/hooks",
input=post_parameters
)
return github.Hook.Hook(self._requester, headers, data, completed=True)
def create_issue(self, title, body=github.GithubObject.NotSet, assignee=github.GithubObject.NotSet, milestone=github.GithubObject.NotSet, labels=github.GithubObject.NotSet):
"""
:calls: `POST /repos/:owner/:repo/issues <http://developer.github.com/v3/issues>`_
:param title: string
:param body: string
:param assignee: string or :class:`github.NamedUser.NamedUser`
:param milestone: :class:`github.Milestone.Milestone`
:param labels: list of :class:`github.Label.Label`
:rtype: :class:`github.Issue.Issue`
"""
assert isinstance(title, (str, unicode)), title
assert body is github.GithubObject.NotSet or isinstance(body, (str, unicode)), body
assert assignee is github.GithubObject.NotSet or isinstance(assignee, github.NamedUser.NamedUser) or isinstance(assignee, (str, unicode)), assignee
assert milestone is github.GithubObject.NotSet or isinstance(milestone, github.Milestone.Milestone), milestone
assert labels is github.GithubObject.NotSet or all(isinstance(element, github.Label.Label) or isinstance(element, str) for element in labels), labels
post_parameters = {
"title": title,
}
if body is not github.GithubObject.NotSet:
post_parameters["body"] = body
if assignee is not github.GithubObject.NotSet:
if isinstance(assignee, (str, unicode)):
post_parameters["assignee"] = assignee
else:
post_parameters["assignee"] = assignee._identity
if milestone is not github.GithubObject.NotSet:
post_parameters["milestone"] = milestone._identity
if labels is not github.GithubObject.NotSet:
post_parameters["labels"] = [element.name if isinstance(element, github.Label.Label) else element for element in labels]
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/issues",
input=post_parameters
)
return github.Issue.Issue(self._requester, headers, data, completed=True)
def create_key(self, title, key):
"""
:calls: `POST /repos/:owner/:repo/keys <http://developer.github.com/v3/repos/keys>`_
:param title: string
:param key: string
:rtype: :class:`github.RepositoryKey.RepositoryKey`
"""
assert isinstance(title, (str, unicode)), title
assert isinstance(key, (str, unicode)), key
post_parameters = {
"title": title,
"key": key,
}
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/keys",
input=post_parameters
)
return github.RepositoryKey.RepositoryKey(self._requester, headers, data, completed=True, repoUrl=self.url)
def create_label(self, name, color):
"""
:calls: `POST /repos/:owner/:repo/labels <http://developer.github.com/v3/issues/labels>`_
:param name: string
:param color: string
:rtype: :class:`github.Label.Label`
"""
assert isinstance(name, (str, unicode)), name
assert isinstance(color, (str, unicode)), color
post_parameters = {
"name": name,
"color": color,
}
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/labels",
input=post_parameters
)
return github.Label.Label(self._requester, headers, data, completed=True)
def create_milestone(self, title, state=github.GithubObject.NotSet, description=github.GithubObject.NotSet, due_on=github.GithubObject.NotSet):
"""
:calls: `POST /repos/:owner/:repo/milestones <http://developer.github.com/v3/issues/milestones>`_
:param title: string
:param state: string
:param description: string
:param due_on: date
:rtype: :class:`github.Milestone.Milestone`
"""
assert isinstance(title, (str, unicode)), title
assert state is github.GithubObject.NotSet or isinstance(state, (str, unicode)), state
assert description is github.GithubObject.NotSet or isinstance(description, (str, unicode)), description
assert due_on is github.GithubObject.NotSet or isinstance(due_on, datetime.date), due_on
post_parameters = {
"title": title,
}
if state is not github.GithubObject.NotSet:
post_parameters["state"] = state
if description is not github.GithubObject.NotSet:
post_parameters["description"] = description
if due_on is not github.GithubObject.NotSet:
post_parameters["due_on"] = due_on.strftime("%Y-%m-%d")
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/milestones",
input=post_parameters
)
return github.Milestone.Milestone(self._requester, headers, data, completed=True)
def create_pull(self, *args, **kwds):
"""
:calls: `POST /repos/:owner/:repo/pulls <http://developer.github.com/v3/pulls>`_
:param title: string
:param body: string
:param issue: :class:`github.Issue.Issue`
:param base: string
:param head: string
:rtype: :class:`github.PullRequest.PullRequest`
"""
if len(args) + len(kwds) == 4:
return self.__create_pull_1(*args, **kwds)
else:
return self.__create_pull_2(*args, **kwds)
def __create_pull_1(self, title, body, base, head):
assert isinstance(title, (str, unicode)), title
assert isinstance(body, (str, unicode)), body
assert isinstance(base, (str, unicode)), base
assert isinstance(head, (str, unicode)), head
return self.__create_pull(title=title, body=body, base=base, head=head)
def __create_pull_2(self, issue, base, head):
assert isinstance(issue, github.Issue.Issue), issue
assert isinstance(base, (str, unicode)), base
assert isinstance(head, (str, unicode)), head
return self.__create_pull(issue=issue._identity, base=base, head=head)
def __create_pull(self, **kwds):
post_parameters = kwds
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/pulls",
input=post_parameters
)
return github.PullRequest.PullRequest(self._requester, headers, data, completed=True)
def delete(self):
"""
:calls: `DELETE /repos/:owner/:repo <http://developer.github.com/v3/repos>`_
:rtype: None
"""
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.url
)
def edit(self, name, description=github.GithubObject.NotSet, homepage=github.GithubObject.NotSet, private=github.GithubObject.NotSet, has_issues=github.GithubObject.NotSet, has_wiki=github.GithubObject.NotSet, has_downloads=github.GithubObject.NotSet, default_branch=github.GithubObject.NotSet):
"""
:calls: `PATCH /repos/:owner/:repo <http://developer.github.com/v3/repos>`_
:param name: string
:param description: string
:param homepage: string
:param private: bool
:param has_issues: bool
:param has_wiki: bool
:param has_downloads: bool
:param default_branch: string
:rtype: None
"""
assert isinstance(name, (str, unicode)), name
assert description is github.GithubObject.NotSet or isinstance(description, (str, unicode)), description
assert homepage is github.GithubObject.NotSet or isinstance(homepage, (str, unicode)), homepage
assert private is github.GithubObject.NotSet or isinstance(private, bool), private
assert has_issues is github.GithubObject.NotSet or isinstance(has_issues, bool), has_issues
assert has_wiki is github.GithubObject.NotSet or isinstance(has_wiki, bool), has_wiki
assert has_downloads is github.GithubObject.NotSet or isinstance(has_downloads, bool), has_downloads
assert default_branch is github.GithubObject.NotSet or isinstance(default_branch, (str, unicode)), default_branch
post_parameters = {
"name": name,
}
if description is not github.GithubObject.NotSet:
post_parameters["description"] = description
if homepage is not github.GithubObject.NotSet:
post_parameters["homepage"] = homepage
if private is not github.GithubObject.NotSet:
post_parameters["private"] = private
if has_issues is not github.GithubObject.NotSet:
post_parameters["has_issues"] = has_issues
if has_wiki is not github.GithubObject.NotSet:
post_parameters["has_wiki"] = has_wiki
if has_downloads is not github.GithubObject.NotSet:
post_parameters["has_downloads"] = has_downloads
if default_branch is not github.GithubObject.NotSet:
post_parameters["default_branch"] = default_branch
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.url,
input=post_parameters
)
self._useAttributes(data)
def get_archive_link(self, archive_format, ref=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/:archive_format/:ref <http://developer.github.com/v3/repos/contents>`_
:param archive_format: string
:param ref: string
:rtype: string
"""
assert isinstance(archive_format, (str, unicode)), archive_format
assert ref is github.GithubObject.NotSet or isinstance(ref, (str, unicode)), ref
url = self.url + "/" + archive_format
if ref is not github.GithubObject.NotSet:
url += "/" + ref
headers, data = self._requester.requestJsonAndCheck(
"GET",
url
)
return headers["location"]
def get_assignees(self):
"""
:calls: `GET /repos/:owner/:repo/assignees <http://developer.github.com/v3/issues/assignees>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/assignees",
None
)
def get_branch(self, branch):
"""
:calls: `GET /repos/:owner/:repo/branches/:branch <http://developer.github.com/v3/repos>`_
:param branch: string
:rtype: :class:`github.Branch.Branch`
"""
assert isinstance(branch, (str, unicode)), branch
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/branches/" + branch
)
return github.Branch.Branch(self._requester, headers, data, completed=True)
def get_branches(self):
"""
:calls: `GET /repos/:owner/:repo/branches <http://developer.github.com/v3/repos>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Branch.Branch`
"""
return github.PaginatedList.PaginatedList(
github.Branch.Branch,
self._requester,
self.url + "/branches",
None
)
def get_collaborators(self):
"""
:calls: `GET /repos/:owner/:repo/collaborators <http://developer.github.com/v3/repos/collaborators>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/collaborators",
None
)
def get_comment(self, id):
"""
:calls: `GET /repos/:owner/:repo/comments/:id <http://developer.github.com/v3/repos/comments>`_
:param id: integer
:rtype: :class:`github.CommitComment.CommitComment`
"""
assert isinstance(id, (int, long)), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/comments/" + str(id)
)
return github.CommitComment.CommitComment(self._requester, headers, data, completed=True)
def get_comments(self):
"""
:calls: `GET /repos/:owner/:repo/comments <http://developer.github.com/v3/repos/comments>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.CommitComment.CommitComment`
"""
return github.PaginatedList.PaginatedList(
github.CommitComment.CommitComment,
self._requester,
self.url + "/comments",
None
)
def get_commit(self, sha):
"""
:calls: `GET /repos/:owner/:repo/commits/:sha <http://developer.github.com/v3/repos/commits>`_
:param sha: string
:rtype: :class:`github.Commit.Commit`
"""
assert isinstance(sha, (str, unicode)), sha
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/commits/" + sha
)
return github.Commit.Commit(self._requester, headers, data, completed=True)
def get_commits(self, sha=github.GithubObject.NotSet, path=github.GithubObject.NotSet, since=github.GithubObject.NotSet, until=github.GithubObject.NotSet, author=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/commits <http://developer.github.com/v3/repos/commits>`_
:param sha: string
:param path: string
:param since: datetime.datetime
:param until: datetime.datetime
:param author: string or :class:`github.NamedUser.NamedUser` or :class:`github.AuthenticatedUser.AuthenticatedUser`
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Commit.Commit`
"""
assert sha is github.GithubObject.NotSet or isinstance(sha, (str, unicode)), sha
assert path is github.GithubObject.NotSet or isinstance(path, (str, unicode)), path
assert since is github.GithubObject.NotSet or isinstance(since, datetime.datetime), since
assert until is github.GithubObject.NotSet or isinstance(until, datetime.datetime), until
assert author is github.GithubObject.NotSet or isinstance(author, (str, unicode, github.NamedUser.NamedUser, github.AuthenticatedUser.AuthenticatedUser)), author
url_parameters = dict()
if sha is not github.GithubObject.NotSet:
url_parameters["sha"] = sha
if path is not github.GithubObject.NotSet:
url_parameters["path"] = path
if since is not github.GithubObject.NotSet:
url_parameters["since"] = since.strftime("%Y-%m-%dT%H:%M:%SZ")
if until is not github.GithubObject.NotSet:
url_parameters["until"] = until.strftime("%Y-%m-%dT%H:%M:%SZ")
if author is not github.GithubObject.NotSet:
if isinstance(author, (github.NamedUser.NamedUser, github.AuthenticatedUser.AuthenticatedUser)):
url_parameters["author"] = author.login
else:
url_parameters["author"] = author
return github.PaginatedList.PaginatedList(
github.Commit.Commit,
self._requester,
self.url + "/commits",
url_parameters
)
def get_contents(self, path, ref=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/contents/:path <http://developer.github.com/v3/repos/contents>`_
:param path: string
:param ref: string
:rtype: :class:`github.ContentFile.ContentFile`
"""
return self.get_file_contents(path, ref)
def get_file_contents(self, path, ref=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/contents/:path <http://developer.github.com/v3/repos/contents>`_
:param path: string
:param ref: string
:rtype: :class:`github.ContentFile.ContentFile`
"""
assert isinstance(path, (str, unicode)), path
assert ref is github.GithubObject.NotSet or isinstance(ref, (str, unicode)), ref
url_parameters = dict()
if ref is not github.GithubObject.NotSet:
url_parameters["ref"] = ref
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/contents" + path,
parameters=url_parameters
)
return github.ContentFile.ContentFile(self._requester, headers, data, completed=True)
def get_dir_contents(self, path, ref=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/contents/:path <http://developer.github.com/v3/repos/contents>`_
:param path: string
:param ref: string
:rtype: list of :class:`github.ContentFile.ContentFile`
"""
assert isinstance(path, (str, unicode)), path
assert ref is github.GithubObject.NotSet or isinstance(ref, (str, unicode)), ref
url_parameters = dict()
if ref is not github.GithubObject.NotSet:
url_parameters["ref"] = ref
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/contents" + path,
parameters=url_parameters
)
# Handle 302 redirect response
if headers.get('status') == '302 Found' and headers.get('location'):
headers, data = self._requester.requestJsonAndCheck(
"GET",
headers['location'],
parameters=url_parameters
)
return [
github.ContentFile.ContentFile(self._requester, headers, attributes, completed=(attributes["type"] != "file")) # Lazy completion only makes sense for files. See discussion here: https://github.com/jacquev6/PyGithub/issues/140#issuecomment-13481130
for attributes in data
]
def get_contributors(self):
"""
:calls: `GET /repos/:owner/:repo/contributors <http://developer.github.com/v3/repos>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/contributors",
None
)
def get_download(self, id):
"""
:calls: `GET /repos/:owner/:repo/downloads/:id <http://developer.github.com/v3/repos/downloads>`_
:param id: integer
:rtype: :class:`github.Download.Download`
"""
assert isinstance(id, (int, long)), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/downloads/" + str(id)
)
return github.Download.Download(self._requester, headers, data, completed=True)
def get_downloads(self):
"""
:calls: `GET /repos/:owner/:repo/downloads <http://developer.github.com/v3/repos/downloads>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Download.Download`
"""
return github.PaginatedList.PaginatedList(
github.Download.Download,
self._requester,
self.url + "/downloads",
None
)
def get_events(self):
"""
:calls: `GET /repos/:owner/:repo/events <http://developer.github.com/v3/activity/events>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Event.Event`
"""
return github.PaginatedList.PaginatedList(
github.Event.Event,
self._requester,
self.url + "/events",
None
)
def get_forks(self):
"""
:calls: `GET /repos/:owner/:repo/forks <http://developer.github.com/v3/repos/forks>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
"""
return github.PaginatedList.PaginatedList(
Repository,
self._requester,
self.url + "/forks",
None
)
def get_git_blob(self, sha):
"""
:calls: `GET /repos/:owner/:repo/git/blobs/:sha <http://developer.github.com/v3/git/blobs>`_
:param sha: string
:rtype: :class:`github.GitBlob.GitBlob`
"""
assert isinstance(sha, (str, unicode)), sha
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/git/blobs/" + sha
)
return github.GitBlob.GitBlob(self._requester, headers, data, completed=True)
def get_git_commit(self, sha):
"""
:calls: `GET /repos/:owner/:repo/git/commits/:sha <http://developer.github.com/v3/git/commits>`_
:param sha: string
:rtype: :class:`github.GitCommit.GitCommit`
"""
assert isinstance(sha, (str, unicode)), sha
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/git/commits/" + sha
)
return github.GitCommit.GitCommit(self._requester, headers, data, completed=True)
def get_git_ref(self, ref):
"""
:calls: `GET /repos/:owner/:repo/git/refs/:ref <http://developer.github.com/v3/git/refs>`_
:param ref: string
:rtype: :class:`github.GitRef.GitRef`
"""
prefix = "/git/refs/"
if not self._requester.FIX_REPO_GET_GIT_REF:
prefix = "/git/"
assert isinstance(ref, (str, unicode)), ref
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + prefix + ref
)
return github.GitRef.GitRef(self._requester, headers, data, completed=True)
def get_git_refs(self):
"""
:calls: `GET /repos/:owner/:repo/git/refs <http://developer.github.com/v3/git/refs>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.GitRef.GitRef`
"""
return github.PaginatedList.PaginatedList(
github.GitRef.GitRef,
self._requester,
self.url + "/git/refs",
None
)
def get_git_tag(self, sha):
"""
:calls: `GET /repos/:owner/:repo/git/tags/:sha <http://developer.github.com/v3/git/tags>`_
:param sha: string
:rtype: :class:`github.GitTag.GitTag`
"""
assert isinstance(sha, (str, unicode)), sha
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/git/tags/" + sha
)
return github.GitTag.GitTag(self._requester, headers, data, completed=True)
def get_git_tree(self, sha, recursive=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/git/trees/:sha <http://developer.github.com/v3/git/trees>`_
:param sha: string
:param recursive: bool
:rtype: :class:`github.GitTree.GitTree`
"""
assert isinstance(sha, (str, unicode)), sha
assert recursive is github.GithubObject.NotSet or isinstance(recursive, bool), recursive
url_parameters = dict()
if recursive is not github.GithubObject.NotSet:
url_parameters["recursive"] = recursive
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/git/trees/" + sha,
parameters=url_parameters
)
return github.GitTree.GitTree(self._requester, headers, data, completed=True)
def get_hook(self, id):
"""
:calls: `GET /repos/:owner/:repo/hooks/:id <http://developer.github.com/v3/repos/hooks>`_
:param id: integer
:rtype: :class:`github.Hook.Hook`
"""
assert isinstance(id, (int, long)), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/hooks/" + str(id)
)
return github.Hook.Hook(self._requester, headers, data, completed=True)
def get_hooks(self):
"""
:calls: `GET /repos/:owner/:repo/hooks <http://developer.github.com/v3/repos/hooks>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Hook.Hook`
"""
return github.PaginatedList.PaginatedList(
github.Hook.Hook,
self._requester,
self.url + "/hooks",
None
)
def get_issue(self, number):
"""
:calls: `GET /repos/:owner/:repo/issues/:number <http://developer.github.com/v3/issues>`_
:param number: integer
:rtype: :class:`github.Issue.Issue`
"""
assert isinstance(number, (int, long)), number
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/issues/" + str(number)
)
return github.Issue.Issue(self._requester, headers, data, completed=True)
def get_issues(self, milestone=github.GithubObject.NotSet, state=github.GithubObject.NotSet, assignee=github.GithubObject.NotSet, mentioned=github.GithubObject.NotSet, labels=github.GithubObject.NotSet, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet, since=github.GithubObject.NotSet, creator=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/issues <http://developer.github.com/v3/issues>`_
:param milestone: :class:`github.Milestone.Milestone` or "none" or "*"
:param state: string
:param assignee: string or :class:`github.NamedUser.NamedUser` or "none" or "*"
:param mentioned: :class:`github.NamedUser.NamedUser`
:param labels: list of :class:`github.Label.Label`
:param sort: string
:param direction: string
:param since: datetime.datetime
:param creator: string or :class:`github.NamedUser.NamedUser`
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Issue.Issue`
"""
assert milestone is github.GithubObject.NotSet or milestone == "*" or milestone == "none" or isinstance(milestone, github.Milestone.Milestone), milestone
assert state is github.GithubObject.NotSet or isinstance(state, (str, unicode)), state
assert assignee is github.GithubObject.NotSet or isinstance(assignee, github.NamedUser.NamedUser) or isinstance(assignee, (str, unicode)), assignee
assert mentioned is github.GithubObject.NotSet or isinstance(mentioned, github.NamedUser.NamedUser), mentioned
assert labels is github.GithubObject.NotSet or all(isinstance(element, github.Label.Label) for element in labels), labels
assert sort is github.GithubObject.NotSet or isinstance(sort, (str, unicode)), sort
assert direction is github.GithubObject.NotSet or isinstance(direction, (str, unicode)), direction
assert since is github.GithubObject.NotSet or isinstance(since, datetime.datetime), since
assert creator is github.GithubObject.NotSet or isinstance(creator, github.NamedUser.NamedUser) or isinstance(creator, (str, unicode)), creator
url_parameters = dict()
if milestone is not github.GithubObject.NotSet:
if isinstance(milestone, str):
url_parameters["milestone"] = milestone
else:
url_parameters["milestone"] = milestone._identity
if state is not github.GithubObject.NotSet:
url_parameters["state"] = state
if assignee is not github.GithubObject.NotSet:
if isinstance(assignee, str):
url_parameters["assignee"] = assignee
else:
url_parameters["assignee"] = assignee._identity
if mentioned is not github.GithubObject.NotSet:
url_parameters["mentioned"] = mentioned._identity
if labels is not github.GithubObject.NotSet:
url_parameters["labels"] = ",".join(label.name for label in labels)
if sort is not github.GithubObject.NotSet:
url_parameters["sort"] = sort
if direction is not github.GithubObject.NotSet:
url_parameters["direction"] = direction
if since is not github.GithubObject.NotSet:
url_parameters["since"] = since.strftime("%Y-%m-%dT%H:%M:%SZ")
if creator is not github.GithubObject.NotSet:
if isinstance(creator, str):
url_parameters["creator"] = creator
else:
url_parameters["creator"] = creator._identity
return github.PaginatedList.PaginatedList(
github.Issue.Issue,
self._requester,
self.url + "/issues",
url_parameters
)
def get_issues_comments(self, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet, since=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/issues/comments <http://developer.github.com/v3/issues/comments>`_
:param sort: string
:param direction: string
:param since: datetime.datetime
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.IssueComment.IssueComment`
"""
assert sort is github.GithubObject.NotSet or isinstance(sort, (str, unicode)), sort
assert direction is github.GithubObject.NotSet or isinstance(direction, (str, unicode)), direction
assert since is github.GithubObject.NotSet or isinstance(since, datetime.datetime), since
url_parameters = dict()
if sort is not github.GithubObject.NotSet:
url_parameters["sort"] = sort
if direction is not github.GithubObject.NotSet:
url_parameters["direction"] = direction
if since is not github.GithubObject.NotSet:
url_parameters["since"] = since.strftime("%Y-%m-%dT%H:%M:%SZ")
return github.PaginatedList.PaginatedList(
github.IssueComment.IssueComment,
self._requester,
self.url + "/issues/comments",
url_parameters
)
def get_issues_event(self, id):
"""
:calls: `GET /repos/:owner/:repo/issues/events/:id <http://developer.github.com/v3/issues/events>`_
:param id: integer
:rtype: :class:`github.IssueEvent.IssueEvent`
"""
assert isinstance(id, (int, long)), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/issues/events/" + str(id)
)
return github.IssueEvent.IssueEvent(self._requester, headers, data, completed=True)
def get_issues_events(self):
"""
:calls: `GET /repos/:owner/:repo/issues/events <http://developer.github.com/v3/issues/events>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.IssueEvent.IssueEvent`
"""
return github.PaginatedList.PaginatedList(
github.IssueEvent.IssueEvent,
self._requester,
self.url + "/issues/events",
None
)
def get_key(self, id):
"""
:calls: `GET /repos/:owner/:repo/keys/:id <http://developer.github.com/v3/repos/keys>`_
:param id: integer
:rtype: :class:`github.RepositoryKey.RepositoryKey`
"""
assert isinstance(id, (int, long)), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/keys/" + str(id)
)
return github.RepositoryKey.RepositoryKey(self._requester, headers, data, completed=True, repoUrl=self.url)
def get_keys(self):
"""
:calls: `GET /repos/:owner/:repo/keys <http://developer.github.com/v3/repos/keys>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.RepositoryKey.RepositoryKey`
"""
return github.PaginatedList.PaginatedList(
lambda requester, headers, data, completed: github.RepositoryKey.RepositoryKey(requester, headers, data, completed, repoUrl=self.url),
self._requester,
self.url + "/keys",
None
)
def get_label(self, name):
"""
:calls: `GET /repos/:owner/:repo/labels/:name <http://developer.github.com/v3/issues/labels>`_
:param name: string
:rtype: :class:`github.Label.Label`
"""
assert isinstance(name, (str, unicode)), name
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/labels/" + urllib.quote(name)
)
return github.Label.Label(self._requester, headers, data, completed=True)
def get_labels(self):
"""
:calls: `GET /repos/:owner/:repo/labels <http://developer.github.com/v3/issues/labels>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Label.Label`
"""
return github.PaginatedList.PaginatedList(
github.Label.Label,
self._requester,
self.url + "/labels",
None
)
def get_languages(self):
"""
:calls: `GET /repos/:owner/:repo/languages <http://developer.github.com/v3/repos>`_
:rtype: dict of string to integer
"""
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/languages"
)
return data
def get_milestone(self, number):
"""
:calls: `GET /repos/:owner/:repo/milestones/:number <http://developer.github.com/v3/issues/milestones>`_
:param number: integer
:rtype: :class:`github.Milestone.Milestone`
"""
assert isinstance(number, (int, long)), number
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/milestones/" + str(number)
)
return github.Milestone.Milestone(self._requester, headers, data, completed=True)
def get_milestones(self, state=github.GithubObject.NotSet, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/milestones <http://developer.github.com/v3/issues/milestones>`_
:param state: string
:param sort: string
:param direction: string
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Milestone.Milestone`
"""
assert state is github.GithubObject.NotSet or isinstance(state, (str, unicode)), state
assert sort is github.GithubObject.NotSet or isinstance(sort, (str, unicode)), sort
assert direction is github.GithubObject.NotSet or isinstance(direction, (str, unicode)), direction
url_parameters = dict()
if state is not github.GithubObject.NotSet:
url_parameters["state"] = state
if sort is not github.GithubObject.NotSet:
url_parameters["sort"] = sort
if direction is not github.GithubObject.NotSet:
url_parameters["direction"] = direction
return github.PaginatedList.PaginatedList(
github.Milestone.Milestone,
self._requester,
self.url + "/milestones",
url_parameters
)
def get_network_events(self):
"""
:calls: `GET /networks/:owner/:repo/events <http://developer.github.com/v3/activity/events>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Event.Event`
"""
return github.PaginatedList.PaginatedList(
github.Event.Event,
self._requester,
"/networks/" + self.owner.login + "/" + self.name + "/events",
None
)
def get_pull(self, number):
"""
:calls: `GET /repos/:owner/:repo/pulls/:number <http://developer.github.com/v3/pulls>`_
:param number: integer
:rtype: :class:`github.PullRequest.PullRequest`
"""
assert isinstance(number, (int, long)), number
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/pulls/" + str(number)
)
return github.PullRequest.PullRequest(self._requester, headers, data, completed=True)
def get_pulls(self, state=github.GithubObject.NotSet, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet, base=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/pulls <http://developer.github.com/v3/pulls>`_
:param state: string
:param sort: string
:param direction: string
:param base: string
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.PullRequest.PullRequest`
"""
assert state is github.GithubObject.NotSet or isinstance(state, (str, unicode)), state
assert sort is github.GithubObject.NotSet or isinstance(sort, (str, unicode)), sort
assert direction is github.GithubObject.NotSet or isinstance(direction, (str, unicode)), direction
assert base is github.GithubObject.NotSet or isinstance(base, (str, unicode)), base
url_parameters = dict()
if state is not github.GithubObject.NotSet:
url_parameters["state"] = state
if sort is not github.GithubObject.NotSet:
url_parameters["sort"] = sort
if direction is not github.GithubObject.NotSet:
url_parameters["direction"] = direction
if base is not github.GithubObject.NotSet:
url_parameters["base"] = base
return github.PaginatedList.PaginatedList(
github.PullRequest.PullRequest,
self._requester,
self.url + "/pulls",
url_parameters
)
def get_pulls_comments(self, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet, since=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/pulls/comments <http://developer.github.com/v3/pulls/comments>`_
:param sort: string
:param direction: string
:param since: datetime.datetime
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.PullRequestComment.PullRequestComment`
"""
return self.get_pulls_review_comments(sort, direction, since)
def get_pulls_review_comments(self, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet, since=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/pulls/comments <http://developer.github.com/v3/pulls/comments>`_
:param sort: string
:param direction: string
:param since: datetime.datetime
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.PullRequestComment.PullRequestComment`
"""
assert sort is github.GithubObject.NotSet or isinstance(sort, (str, unicode)), sort
assert direction is github.GithubObject.NotSet or isinstance(direction, (str, unicode)), direction
assert since is github.GithubObject.NotSet or isinstance(since, datetime.datetime), since
url_parameters = dict()
if sort is not github.GithubObject.NotSet:
url_parameters["sort"] = sort
if direction is not github.GithubObject.NotSet:
url_parameters["direction"] = direction
if since is not github.GithubObject.NotSet:
url_parameters["since"] = since.strftime("%Y-%m-%dT%H:%M:%SZ")
return github.PaginatedList.PaginatedList(
github.IssueComment.IssueComment,
self._requester,
self.url + "/pulls/comments",
url_parameters
)
def get_readme(self, ref=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/readme <http://developer.github.com/v3/repos/contents>`_
:param ref: string
:rtype: :class:`github.ContentFile.ContentFile`
"""
assert ref is github.GithubObject.NotSet or isinstance(ref, (str, unicode)), ref
url_parameters = dict()
if ref is not github.GithubObject.NotSet:
url_parameters["ref"] = ref
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/readme",
parameters=url_parameters
)
return github.ContentFile.ContentFile(self._requester, headers, data, completed=True)
def get_stargazers(self):
"""
:calls: `GET /repos/:owner/:repo/stargazers <http://developer.github.com/v3/activity/starring>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/stargazers",
None
)
def get_stargazers_with_dates(self):
"""
:calls: `GET /repos/:owner/:repo/stargazers <http://developer.github.com/v3/activity/starring>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Stargazer.Stargazer`
"""
return github.PaginatedList.PaginatedList(
github.Stargazer.Stargazer,
self._requester,
self.url + "/stargazers",
None,
headers={'Accept': 'application/vnd.github.v3.star+json'}
)
def get_stats_contributors(self):
"""
:calls: `GET /repos/:owner/:repo/stats/contributors <http://developer.github.com/v3/repos/statistics/#get-contributors-list-with-additions-deletions-and-commit-counts>`_
:rtype: None or list of :class:`github.StatsContributor.StatsContributor`
"""
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/stats/contributors"
)
if data == {}:
return None
else:
return [
github.StatsContributor.StatsContributor(self._requester, headers, attributes, completed=True)
for attributes in data
]
def get_stats_commit_activity(self):
"""
:calls: `GET /repos/:owner/:repo/stats/commit_activity <developer.github.com/v3/repos/statistics/#get-the-number-of-commits-per-hour-in-each-day>`_
:rtype: None or list of :class:`github.StatsCommitActivity.StatsCommitActivity`
"""
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/stats/commit_activity"
)
if data == {}:
return None
else:
return [
github.StatsCommitActivity.StatsCommitActivity(self._requester, headers, attributes, completed=True)
for attributes in data
]
def get_stats_code_frequency(self):
"""
:calls: `GET /repos/:owner/:repo/stats/code_frequency <http://developer.github.com/v3/repos/statistics/#get-the-number-of-additions-and-deletions-per-week>`_
:rtype: None or list of :class:`github.StatsCodeFrequency.StatsCodeFrequency`
"""
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/stats/code_frequency"
)
if data == {}:
return None
else:
return [
github.StatsCodeFrequency.StatsCodeFrequency(self._requester, headers, attributes, completed=True)
for attributes in data
]
def get_stats_participation(self):
"""
:calls: `GET /repos/:owner/:repo/stats/participation <http://developer.github.com/v3/repos/statistics/#get-the-weekly-commit-count-for-the-repo-owner-and-everyone-else>`_
:rtype: None or :class:`github.StatsParticipation.StatsParticipation`
"""
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/stats/participation"
)
if data == {}:
return None
else:
return github.StatsParticipation.StatsParticipation(self._requester, headers, data, completed=True)
def get_stats_punch_card(self):
"""
:calls: `GET /repos/:owner/:repo/stats/punch_card <http://developer.github.com/v3/repos/statistics/#get-the-number-of-commits-per-hour-in-each-day>`_
:rtype: None or :class:`github.StatsPunchCard.StatsPunchCard`
"""
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/stats/punch_card"
)
if data == {}:
return None
else:
return github.StatsPunchCard.StatsPunchCard(self._requester, headers, data, completed=True)
def get_subscribers(self):
"""
:calls: `GET /repos/:owner/:repo/subscribers <http://developer.github.com/v3/activity/watching>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/subscribers",
None
)
def get_tags(self):
"""
:calls: `GET /repos/:owner/:repo/tags <http://developer.github.com/v3/repos>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Tag.Tag`
"""
return github.PaginatedList.PaginatedList(
github.Tag.Tag,
self._requester,
self.url + "/tags",
None
)
def get_releases(self):
"""
:calls: `GET /repos/:owner/:repo/releases <http://developer.github.com/v3/repos>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Tag.Tag`
"""
return github.PaginatedList.PaginatedList(
github.GitRelease.GitRelease,
self._requester,
self.url + "/releases",
None
)
def get_release(self, id):
"""
:calls: `GET /repos/:owner/:repo/releases/:id https://developer.github.com/v3/repos/releases/#get-a-single-release
:param id: int (release id), str (tag name)
:rtype: None or :class:`github.GitRelease.GitRelease`
"""
if isinstance(id, int):
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/releases/" + str(id)
)
return github.GitRelease.GitRelease(self._requester, headers, data, completed=True)
elif isinstance(id, str):
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/releases/tags/" + id
)
return github.GitRelease.GitRelease(self._requester, headers, data, completed=True)
def get_teams(self):
"""
:calls: `GET /repos/:owner/:repo/teams <http://developer.github.com/v3/repos>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Team.Team`
"""
return github.PaginatedList.PaginatedList(
github.Team.Team,
self._requester,
self.url + "/teams",
None
)
def get_watchers(self):
"""
:calls: `GET /repos/:owner/:repo/watchers <http://developer.github.com/v3/activity/starring>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/watchers",
None
)
def has_in_assignees(self, assignee):
"""
:calls: `GET /repos/:owner/:repo/assignees/:assignee <http://developer.github.com/v3/issues/assignees>`_
:param assignee: string or :class:`github.NamedUser.NamedUser`
:rtype: bool
"""
assert isinstance(assignee, github.NamedUser.NamedUser) or isinstance(assignee, (str, unicode)), assignee
if isinstance(assignee, github.NamedUser.NamedUser):
assignee = assignee._identity
status, headers, data = self._requester.requestJson(
"GET",
self.url + "/assignees/" + assignee
)
return status == 204
def has_in_collaborators(self, collaborator):
"""
:calls: `GET /repos/:owner/:repo/collaborators/:user <http://developer.github.com/v3/repos/collaborators>`_
:param collaborator: string or :class:`github.NamedUser.NamedUser`
:rtype: bool
"""
assert isinstance(collaborator, github.NamedUser.NamedUser) or isinstance(collaborator, (str, unicode)), collaborator
if isinstance(collaborator, github.NamedUser.NamedUser):
collaborator = collaborator._identity
status, headers, data = self._requester.requestJson(
"GET",
self.url + "/collaborators/" + collaborator
)
return status == 204
def legacy_search_issues(self, state, keyword):
"""
:calls: `GET /legacy/issues/search/:owner/:repository/:state/:keyword <http://developer.github.com/v3/search/legacy>`_
:param state: "open" or "closed"
:param keyword: string
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Issue.Issue`
"""
assert state in ["open", "closed"], state
assert isinstance(keyword, (str, unicode)), keyword
headers, data = self._requester.requestJsonAndCheck(
"GET",
"/legacy/issues/search/" + self.owner.login + "/" + self.name + "/" + state + "/" + urllib.quote(keyword)
)
return [
github.Issue.Issue(self._requester, headers, github.Legacy.convertIssue(element), completed=False)
for element in data["issues"]
]
def merge(self, base, head, commit_message=github.GithubObject.NotSet):
"""
:calls: `POST /repos/:owner/:repo/merges <http://developer.github.com/v3/repos/merging>`_
:param base: string
:param head: string
:param commit_message: string
:rtype: :class:`github.Commit.Commit`
"""
assert isinstance(base, (str, unicode)), base
assert isinstance(head, (str, unicode)), head
assert commit_message is github.GithubObject.NotSet or isinstance(commit_message, (str, unicode)), commit_message
post_parameters = {
"base": base,
"head": head,
}
if commit_message is not github.GithubObject.NotSet:
post_parameters["commit_message"] = commit_message
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/merges",
input=post_parameters
)
if data is None:
return None
else:
return github.Commit.Commit(self._requester, headers, data, completed=True)
def remove_from_collaborators(self, collaborator):
"""
:calls: `DELETE /repos/:owner/:repo/collaborators/:user <http://developer.github.com/v3/repos/collaborators>`_
:param collaborator: string or :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(collaborator, github.NamedUser.NamedUser) or isinstance(collaborator, (str, unicode)), collaborator
if isinstance(collaborator, github.NamedUser.NamedUser):
collaborator = collaborator._identity
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.url + "/collaborators/" + collaborator
)
def subscribe_to_hub(self, event, callback, secret=github.GithubObject.NotSet):
"""
:calls: `POST /hub <http://developer.github.com/>`_
:param event: string
:param callback: string
:param secret: string
:rtype: None
"""
return self._hub("subscribe", event, callback, secret)
def unsubscribe_from_hub(self, event, callback):
"""
:calls: `POST /hub <http://developer.github.com/>`_
:param event: string
:param callback: string
:param secret: string
:rtype: None
"""
return self._hub("unsubscribe", event, callback, github.GithubObject.NotSet)
def _hub(self, mode, event, callback, secret):
assert isinstance(mode, (str, unicode)), mode
assert isinstance(event, (str, unicode)), event
assert isinstance(callback, (str, unicode)), callback
assert secret is github.GithubObject.NotSet or isinstance(secret, (str, unicode)), secret
post_parameters = {
"hub.mode": mode,
"hub.topic": "https://github.com/" + self.full_name + "/events/" + event,
"hub.callback": callback,
}
if secret is not github.GithubObject.NotSet:
post_parameters["hub.secret"] = secret
headers, output = self._requester.requestMultipartAndCheck(
"POST",
"/hub",
input=post_parameters
)
@property
def _identity(self):
return self.owner.login + "/" + self.name
def _initAttributes(self):
self._archive_url = github.GithubObject.NotSet
self._assignees_url = github.GithubObject.NotSet
self._blobs_url = github.GithubObject.NotSet
self._branches_url = github.GithubObject.NotSet
self._clone_url = github.GithubObject.NotSet
self._collaborators_url = github.GithubObject.NotSet
self._comments_url = github.GithubObject.NotSet
self._commits_url = github.GithubObject.NotSet
self._compare_url = github.GithubObject.NotSet
self._contents_url = github.GithubObject.NotSet
self._contributors_url = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._default_branch = github.GithubObject.NotSet
self._description = github.GithubObject.NotSet
self._downloads_url = github.GithubObject.NotSet
self._events_url = github.GithubObject.NotSet
self._fork = github.GithubObject.NotSet
self._forks = github.GithubObject.NotSet
self._forks_count = github.GithubObject.NotSet
self._forks_url = github.GithubObject.NotSet
self._full_name = github.GithubObject.NotSet
self._git_commits_url = github.GithubObject.NotSet
self._git_refs_url = github.GithubObject.NotSet
self._git_tags_url = github.GithubObject.NotSet
self._git_url = github.GithubObject.NotSet
self._has_downloads = github.GithubObject.NotSet
self._has_issues = github.GithubObject.NotSet
self._has_wiki = github.GithubObject.NotSet
self._homepage = github.GithubObject.NotSet
self._hooks_url = github.GithubObject.NotSet
self._html_url = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._issue_comment_url = github.GithubObject.NotSet
self._issue_events_url = github.GithubObject.NotSet
self._issues_url = github.GithubObject.NotSet
self._keys_url = github.GithubObject.NotSet
self._labels_url = github.GithubObject.NotSet
self._language = github.GithubObject.NotSet
self._languages_url = github.GithubObject.NotSet
self._master_branch = github.GithubObject.NotSet
self._merges_url = github.GithubObject.NotSet
self._milestones_url = github.GithubObject.NotSet
self._mirror_url = github.GithubObject.NotSet
self._name = github.GithubObject.NotSet
self._network_count = github.GithubObject.NotSet
self._notifications_url = github.GithubObject.NotSet
self._open_issues = github.GithubObject.NotSet
self._open_issues_count = github.GithubObject.NotSet
self._organization = github.GithubObject.NotSet
self._owner = github.GithubObject.NotSet
self._parent = github.GithubObject.NotSet
self._permissions = github.GithubObject.NotSet
self._private = github.GithubObject.NotSet
self._pulls_url = github.GithubObject.NotSet
self._pushed_at = github.GithubObject.NotSet
self._size = github.GithubObject.NotSet
self._source = github.GithubObject.NotSet
self._ssh_url = github.GithubObject.NotSet
self._stargazers_count = github.GithubObject.NotSet
self._stargazers_url = github.GithubObject.NotSet
self._statuses_url = github.GithubObject.NotSet
self._subscribers_url = github.GithubObject.NotSet
self._subscription_url = github.GithubObject.NotSet
self._svn_url = github.GithubObject.NotSet
self._tags_url = github.GithubObject.NotSet
self._teams_url = github.GithubObject.NotSet
self._trees_url = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
self._watchers = github.GithubObject.NotSet
self._watchers_count = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "archive_url" in attributes: # pragma no branch
self._archive_url = self._makeStringAttribute(attributes["archive_url"])
if "assignees_url" in attributes: # pragma no branch
self._assignees_url = self._makeStringAttribute(attributes["assignees_url"])
if "blobs_url" in attributes: # pragma no branch
self._blobs_url = self._makeStringAttribute(attributes["blobs_url"])
if "branches_url" in attributes: # pragma no branch
self._branches_url = self._makeStringAttribute(attributes["branches_url"])
if "clone_url" in attributes: # pragma no branch
self._clone_url = self._makeStringAttribute(attributes["clone_url"])
if "collaborators_url" in attributes: # pragma no branch
self._collaborators_url = self._makeStringAttribute(attributes["collaborators_url"])
if "comments_url" in attributes: # pragma no branch
self._comments_url = self._makeStringAttribute(attributes["comments_url"])
if "commits_url" in attributes: # pragma no branch
self._commits_url = self._makeStringAttribute(attributes["commits_url"])
if "compare_url" in attributes: # pragma no branch
self._compare_url = self._makeStringAttribute(attributes["compare_url"])
if "contents_url" in attributes: # pragma no branch
self._contents_url = self._makeStringAttribute(attributes["contents_url"])
if "contributors_url" in attributes: # pragma no branch
self._contributors_url = self._makeStringAttribute(attributes["contributors_url"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "default_branch" in attributes: # pragma no branch
self._default_branch = self._makeStringAttribute(attributes["default_branch"])
if "description" in attributes: # pragma no branch
self._description = self._makeStringAttribute(attributes["description"])
if "downloads_url" in attributes: # pragma no branch
self._downloads_url = self._makeStringAttribute(attributes["downloads_url"])
if "events_url" in attributes: # pragma no branch
self._events_url = self._makeStringAttribute(attributes["events_url"])
if "fork" in attributes: # pragma no branch
self._fork = self._makeBoolAttribute(attributes["fork"])
if "forks" in attributes: # pragma no branch
self._forks = self._makeIntAttribute(attributes["forks"])
if "forks_count" in attributes: # pragma no branch
self._forks_count = self._makeIntAttribute(attributes["forks_count"])
if "forks_url" in attributes: # pragma no branch
self._forks_url = self._makeStringAttribute(attributes["forks_url"])
if "full_name" in attributes: # pragma no branch
self._full_name = self._makeStringAttribute(attributes["full_name"])
if "git_commits_url" in attributes: # pragma no branch
self._git_commits_url = self._makeStringAttribute(attributes["git_commits_url"])
if "git_refs_url" in attributes: # pragma no branch
self._git_refs_url = self._makeStringAttribute(attributes["git_refs_url"])
if "git_tags_url" in attributes: # pragma no branch
self._git_tags_url = self._makeStringAttribute(attributes["git_tags_url"])
if "git_url" in attributes: # pragma no branch
self._git_url = self._makeStringAttribute(attributes["git_url"])
if "has_downloads" in attributes: # pragma no branch
self._has_downloads = self._makeBoolAttribute(attributes["has_downloads"])
if "has_issues" in attributes: # pragma no branch
self._has_issues = self._makeBoolAttribute(attributes["has_issues"])
if "has_wiki" in attributes: # pragma no branch
self._has_wiki = self._makeBoolAttribute(attributes["has_wiki"])
if "homepage" in attributes: # pragma no branch
self._homepage = self._makeStringAttribute(attributes["homepage"])
if "hooks_url" in attributes: # pragma no branch
self._hooks_url = self._makeStringAttribute(attributes["hooks_url"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "issue_comment_url" in attributes: # pragma no branch
self._issue_comment_url = self._makeStringAttribute(attributes["issue_comment_url"])
if "issue_events_url" in attributes: # pragma no branch
self._issue_events_url = self._makeStringAttribute(attributes["issue_events_url"])
if "issues_url" in attributes: # pragma no branch
self._issues_url = self._makeStringAttribute(attributes["issues_url"])
if "keys_url" in attributes: # pragma no branch
self._keys_url = self._makeStringAttribute(attributes["keys_url"])
if "labels_url" in attributes: # pragma no branch
self._labels_url = self._makeStringAttribute(attributes["labels_url"])
if "language" in attributes: # pragma no branch
self._language = self._makeStringAttribute(attributes["language"])
if "languages_url" in attributes: # pragma no branch
self._languages_url = self._makeStringAttribute(attributes["languages_url"])
if "master_branch" in attributes: # pragma no branch
self._master_branch = self._makeStringAttribute(attributes["master_branch"])
if "merges_url" in attributes: # pragma no branch
self._merges_url = self._makeStringAttribute(attributes["merges_url"])
if "milestones_url" in attributes: # pragma no branch
self._milestones_url = self._makeStringAttribute(attributes["milestones_url"])
if "mirror_url" in attributes: # pragma no branch
self._mirror_url = self._makeStringAttribute(attributes["mirror_url"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "network_count" in attributes: # pragma no branch
self._network_count = self._makeIntAttribute(attributes["network_count"])
if "notifications_url" in attributes: # pragma no branch
self._notifications_url = self._makeStringAttribute(attributes["notifications_url"])
if "open_issues" in attributes: # pragma no branch
self._open_issues = self._makeIntAttribute(attributes["open_issues"])
if "open_issues_count" in attributes: # pragma no branch
self._open_issues_count = self._makeIntAttribute(attributes["open_issues_count"])
if "organization" in attributes: # pragma no branch
self._organization = self._makeClassAttribute(github.Organization.Organization, attributes["organization"])
if "owner" in attributes: # pragma no branch
self._owner = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["owner"])
if "parent" in attributes: # pragma no branch
self._parent = self._makeClassAttribute(Repository, attributes["parent"])
if "permissions" in attributes: # pragma no branch
self._permissions = self._makeClassAttribute(github.Permissions.Permissions, attributes["permissions"])
if "private" in attributes: # pragma no branch
self._private = self._makeBoolAttribute(attributes["private"])
if "pulls_url" in attributes: # pragma no branch
self._pulls_url = self._makeStringAttribute(attributes["pulls_url"])
if "pushed_at" in attributes: # pragma no branch
self._pushed_at = self._makeDatetimeAttribute(attributes["pushed_at"])
if "size" in attributes: # pragma no branch
self._size = self._makeIntAttribute(attributes["size"])
if "source" in attributes: # pragma no branch
self._source = self._makeClassAttribute(Repository, attributes["source"])
if "ssh_url" in attributes: # pragma no branch
self._ssh_url = self._makeStringAttribute(attributes["ssh_url"])
if "stargazers_count" in attributes: # pragma no branch
self._stargazers_count = self._makeIntAttribute(attributes["stargazers_count"])
if "stargazers_url" in attributes: # pragma no branch
self._stargazers_url = self._makeStringAttribute(attributes["stargazers_url"])
if "statuses_url" in attributes: # pragma no branch
self._statuses_url = self._makeStringAttribute(attributes["statuses_url"])
if "subscribers_url" in attributes: # pragma no branch
self._subscribers_url = self._makeStringAttribute(attributes["subscribers_url"])
if "subscription_url" in attributes: # pragma no branch
self._subscription_url = self._makeStringAttribute(attributes["subscription_url"])
if "svn_url" in attributes: # pragma no branch
self._svn_url = self._makeStringAttribute(attributes["svn_url"])
if "tags_url" in attributes: # pragma no branch
self._tags_url = self._makeStringAttribute(attributes["tags_url"])
if "teams_url" in attributes: # pragma no branch
self._teams_url = self._makeStringAttribute(attributes["teams_url"])
if "trees_url" in attributes: # pragma no branch
self._trees_url = self._makeStringAttribute(attributes["trees_url"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
if "watchers" in attributes: # pragma no branch
self._watchers = self._makeIntAttribute(attributes["watchers"])
if "watchers_count" in attributes: # pragma no branch
self._watchers_count = self._makeIntAttribute(attributes["watchers_count"])
|
gpl-3.0
|
raven47git/flask
|
flask/_compat.py
|
22
|
2934
|
# -*- coding: utf-8 -*-
"""
flask._compat
~~~~~~~~~~~~~
Some py2/py3 compatibility support based on a stripped down
version of six so we don't have to depend on a specific version
of it.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
PY2 = sys.version_info[0] == 2
_identity = lambda x: x
if not PY2:
text_type = str
string_types = (str,)
integer_types = (int,)
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
from io import StringIO
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
implements_to_string = _identity
else:
text_type = unicode
string_types = (str, unicode)
integer_types = (int, long)
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
from cStringIO import StringIO
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
def with_metaclass(meta, *bases):
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instantiation that replaces
# itself with the actual metaclass. Because of internal type checks
# we also need to make sure that we downgrade the custom metaclass
# for one level to something closer to type (that's why __call__ and
# __init__ comes back from type etc.).
#
# This has the advantage over six.with_metaclass in that it does not
# introduce dummy classes into the final MRO.
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
# Certain versions of pypy have a bug where clearing the exception stack
# breaks the __exit__ function in a very peculiar way. This is currently
# true for pypy 2.2.1 for instance. The second level of exception blocks
# is necessary because pypy seems to forget to check if an exception
# happened until the next bytecode instruction?
BROKEN_PYPY_CTXMGR_EXIT = False
if hasattr(sys, 'pypy_version_info'):
class _Mgr(object):
def __enter__(self):
return self
def __exit__(self, *args):
sys.exc_clear()
try:
try:
with _Mgr():
raise AssertionError()
except:
raise
except TypeError:
BROKEN_PYPY_CTXMGR_EXIT = True
except AssertionError:
pass
|
bsd-3-clause
|
willthames/ansible
|
lib/ansible/modules/cloud/webfaction/webfaction_app.py
|
63
|
6444
|
#!/usr/bin/python
#
# Create a Webfaction application using Ansible and the Webfaction API
#
# Valid application types can be found by looking here:
# http://docs.webfaction.com/xmlrpc-api/apps.html#application-types
#
# ------------------------------------------
#
# (c) Quentin Stafford-Fraser 2015, with contributions gratefully acknowledged from:
# * Andy Baker
# * Federico Tarantini
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: webfaction_app
short_description: Add or remove applications on a Webfaction host
description:
- Add or remove applications on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction.
author: Quentin Stafford-Fraser (@quentinsf)
version_added: "2.0"
notes:
- >
You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
your host, you may want to add C(serial: 1) to the plays.
- See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info.
options:
name:
description:
- The name of the application
required: true
state:
description:
- Whether the application should exist
required: false
choices: ['present', 'absent']
default: "present"
type:
description:
- The type of application to create. See the Webfaction docs at http://docs.webfaction.com/xmlrpc-api/apps.html for a list.
required: true
autostart:
description:
- Whether the app should restart with an autostart.cgi script
required: false
default: "no"
extra_info:
description:
- Any extra parameters required by the app
required: false
default: null
port_open:
description:
- IF the port should be opened
required: false
default: false
login_name:
description:
- The webfaction account to use
required: true
login_password:
description:
- The webfaction password to use
required: true
machine:
description:
- The machine name to use (optional for accounts with only one machine)
required: false
'''
EXAMPLES = '''
- name: Create a test app
webfaction_app:
name="my_wsgi_app1"
state=present
type=mod_wsgi35-python27
login_name={{webfaction_user}}
login_password={{webfaction_passwd}}
machine={{webfaction_machine}}
'''
import xmlrpclib
webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
state = dict(required=False, choices=['present', 'absent'], default='present'),
type = dict(required=True),
autostart = dict(required=False, type='bool', default=False),
extra_info = dict(required=False, default=""),
port_open = dict(required=False, type='bool', default=False),
login_name = dict(required=True),
login_password = dict(required=True, no_log=True),
machine = dict(required=False, default=False),
),
supports_check_mode=True
)
app_name = module.params['name']
app_type = module.params['type']
app_state = module.params['state']
if module.params['machine']:
session_id, account = webfaction.login(
module.params['login_name'],
module.params['login_password'],
module.params['machine']
)
else:
session_id, account = webfaction.login(
module.params['login_name'],
module.params['login_password']
)
app_list = webfaction.list_apps(session_id)
app_map = dict([(i['name'], i) for i in app_list])
existing_app = app_map.get(app_name)
result = {}
# Here's where the real stuff happens
if app_state == 'present':
# Does an app with this name already exist?
if existing_app:
if existing_app['type'] != app_type:
module.fail_json(msg="App already exists with different type. Please fix by hand.")
# If it exists with the right type, we don't change it
# Should check other parameters.
module.exit_json(
changed = False,
)
if not module.check_mode:
# If this isn't a dry run, create the app
result.update(
webfaction.create_app(
session_id, app_name, app_type,
module.boolean(module.params['autostart']),
module.params['extra_info'],
module.boolean(module.params['port_open'])
)
)
elif app_state == 'absent':
# If the app's already not there, nothing changed.
if not existing_app:
module.exit_json(
changed = False,
)
if not module.check_mode:
# If this isn't a dry run, delete the app
result.update(
webfaction.delete_app(session_id, app_name)
)
else:
module.fail_json(msg="Unknown state specified: {}".format(app_state))
module.exit_json(
changed = True,
result = result
)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
ajshastri/pygooglevoice
|
setup.py
|
39
|
1037
|
from distutils.core import setup
README = """Python Google Voice
====================
Joe McCall & Justin Quick
Exposing the Google Voice "API" to the Python language
-------------------------------------------------------
Google Voice for Python Allows you to place calls, send sms, download voicemail, and check the various folders of your Google Voice Accounts.
You can use the Python API or command line script to schedule calls, check for new recieved calls/sms, or even sync your recorded voicemails/calls.
Works for Python 2 and Python 3
Full documentation is available up at http://sphinxdoc.github.com/pygooglevoice/
"""
setup(
name = "pygooglevoice",
version = '0.5',
url = 'http://code.google.com/p/pygooglevoice',
author = 'Justin Quick and Joe McCall',
author_email='[email protected], [email protected]',
description = 'Python 2/3 Interface for Google Voice',
long_description = README,
packages = ['googlevoice'],
scripts = ['bin/gvoice','bin/asterisk-gvoice-setup', 'bin/gvi']
)
|
bsd-3-clause
|
dantebarba/docker-media-server
|
plex/Subliminal.bundle/Contents/Libraries/Shared/pysrt/srtfile.py
|
18
|
10501
|
# -*- coding: utf-8 -*-
import os
import sys
import codecs
try:
from collections import UserList
except ImportError:
from UserList import UserList
from itertools import chain
from copy import copy
from pysrt.srtexc import Error
from pysrt.srtitem import SubRipItem
from pysrt.compat import str
BOMS = ((codecs.BOM_UTF32_LE, 'utf_32_le'),
(codecs.BOM_UTF32_BE, 'utf_32_be'),
(codecs.BOM_UTF16_LE, 'utf_16_le'),
(codecs.BOM_UTF16_BE, 'utf_16_be'),
(codecs.BOM_UTF8, 'utf_8'))
CODECS_BOMS = dict((codec, str(bom, codec)) for bom, codec in BOMS)
BIGGER_BOM = max(len(bom) for bom, encoding in BOMS)
class SubRipFile(UserList, object):
"""
SubRip file descriptor.
Provide a pure Python mapping on all metadata.
SubRipFile(items, eol, path, encoding)
items -> list of SubRipItem. Default to [].
eol -> str: end of line character. Default to linesep used in opened file
if any else to os.linesep.
path -> str: path where file will be saved. To open an existant file see
SubRipFile.open.
encoding -> str: encoding used at file save. Default to utf-8.
"""
ERROR_PASS = 0
ERROR_LOG = 1
ERROR_RAISE = 2
DEFAULT_ENCODING = 'utf_8'
def __init__(self, items=None, eol=None, path=None, encoding='utf-8'):
UserList.__init__(self, items or [])
self._eol = eol
self.path = path
self.encoding = encoding
def _get_eol(self):
return self._eol or os.linesep
def _set_eol(self, eol):
self._eol = self._eol or eol
eol = property(_get_eol, _set_eol)
def slice(self, starts_before=None, starts_after=None, ends_before=None,
ends_after=None):
"""
slice([starts_before][, starts_after][, ends_before][, ends_after]) \
-> SubRipFile clone
All arguments are optional, and should be coercible to SubRipTime
object.
It reduce the set of subtitles to those that match match given time
constraints.
The returned set is a clone, but still contains references to original
subtitles. So if you shift this returned set, subs contained in the
original SubRipFile instance will be altered too.
Example:
>>> subs.slice(ends_after={'seconds': 20}).shift(seconds=2)
"""
clone = copy(self)
if starts_before:
clone.data = (i for i in clone.data if i.start < starts_before)
if starts_after:
clone.data = (i for i in clone.data if i.start > starts_after)
if ends_before:
clone.data = (i for i in clone.data if i.end < ends_before)
if ends_after:
clone.data = (i for i in clone.data if i.end > ends_after)
clone.data = list(clone.data)
return clone
def at(self, timestamp=None, **kwargs):
"""
at(timestamp) -> SubRipFile clone
timestamp argument should be coercible to SubRipFile object.
A specialization of slice. Return all subtiles visible at the
timestamp mark.
Example:
>>> subs.at((0, 0, 20, 0)).shift(seconds=2)
>>> subs.at(seconds=20).shift(seconds=2)
"""
time = timestamp or kwargs
return self.slice(starts_before=time, ends_after=time)
def shift(self, *args, **kwargs):
"""shift(hours, minutes, seconds, milliseconds, ratio)
Shift `start` and `end` attributes of each items of file either by
applying a ratio or by adding an offset.
`ratio` should be either an int or a float.
Example to convert subtitles from 23.9 fps to 25 fps:
>>> subs.shift(ratio=25/23.9)
All "time" arguments are optional and have a default value of 0.
Example to delay all subs from 2 seconds and half
>>> subs.shift(seconds=2, milliseconds=500)
"""
for item in self:
item.shift(*args, **kwargs)
def clean_indexes(self):
"""
clean_indexes()
Sort subs and reset their index attribute. Should be called after
destructive operations like split or such.
"""
self.sort()
for index, item in enumerate(self):
item.index = index + 1
@property
def text(self):
return '\n'.join(i.text for i in self)
@classmethod
def open(cls, path='', encoding=None, error_handling=ERROR_PASS):
"""
open([path, [encoding]])
If you do not provide any encoding, it can be detected if the file
contain a bit order mark, unless it is set to utf-8 as default.
"""
new_file = cls(path=path, encoding=encoding)
source_file = cls._open_unicode_file(path, claimed_encoding=encoding)
new_file.read(source_file, error_handling=error_handling)
source_file.close()
return new_file
@classmethod
def from_string(cls, source, **kwargs):
"""
from_string(source, **kwargs) -> SubRipFile
`source` -> a unicode instance or at least a str instance encoded with
`sys.getdefaultencoding()`
"""
error_handling = kwargs.pop('error_handling', None)
new_file = cls(**kwargs)
new_file.read(source.splitlines(True), error_handling=error_handling)
return new_file
def read(self, source_file, error_handling=ERROR_PASS):
"""
read(source_file, [error_handling])
This method parse subtitles contained in `source_file` and append them
to the current instance.
`source_file` -> Any iterable that yield unicode strings, like a file
opened with `codecs.open()` or an array of unicode.
"""
self.eol = self._guess_eol(source_file)
self.extend(self.stream(source_file, error_handling=error_handling))
return self
@classmethod
def stream(cls, source_file, error_handling=ERROR_PASS):
"""
stream(source_file, [error_handling])
This method yield SubRipItem instances a soon as they have been parsed
without storing them. It is a kind of SAX parser for .srt files.
`source_file` -> Any iterable that yield unicode strings, like a file
opened with `codecs.open()` or an array of unicode.
Example:
>>> import pysrt
>>> import codecs
>>> file = codecs.open('movie.srt', encoding='utf-8')
>>> for sub in pysrt.stream(file):
... sub.text += "\nHello !"
... print unicode(sub)
"""
string_buffer = []
for index, line in enumerate(chain(source_file, '\n')):
if line.strip():
string_buffer.append(line)
else:
source = string_buffer
string_buffer = []
if source and all(source):
try:
yield SubRipItem.from_lines(source)
except Error as error:
error.args += (''.join(source), )
cls._handle_error(error, error_handling, index)
def save(self, path=None, encoding=None, eol=None):
"""
save([path][, encoding][, eol])
Use initial path if no other provided.
Use initial encoding if no other provided.
Use initial eol if no other provided.
"""
path = path or self.path
encoding = encoding or self.encoding
save_file = codecs.open(path, 'w+', encoding=encoding)
self.write_into(save_file, eol=eol)
save_file.close()
def write_into(self, output_file, eol=None):
"""
write_into(output_file [, eol])
Serialize current state into `output_file`.
`output_file` -> Any instance that respond to `write()`, typically a
file object
"""
output_eol = eol or self.eol
for item in self:
string_repr = str(item)
if output_eol != '\n':
string_repr = string_repr.replace('\n', output_eol)
output_file.write(string_repr)
# Only add trailing eol if it's not already present.
# It was kept in the SubRipItem's text before but it really
# belongs here. Existing applications might give us subtitles
# which already contain a trailing eol though.
if not string_repr.endswith(2 * output_eol):
output_file.write(output_eol)
@classmethod
def _guess_eol(cls, string_iterable):
first_line = cls._get_first_line(string_iterable)
for eol in ('\r\n', '\r', '\n'):
if first_line.endswith(eol):
return eol
return os.linesep
@classmethod
def _get_first_line(cls, string_iterable):
if hasattr(string_iterable, 'tell'):
previous_position = string_iterable.tell()
try:
first_line = next(iter(string_iterable))
except StopIteration:
return ''
if hasattr(string_iterable, 'seek'):
string_iterable.seek(previous_position)
return first_line
@classmethod
def _detect_encoding(cls, path):
file_descriptor = open(path, 'rb')
first_chars = file_descriptor.read(BIGGER_BOM)
file_descriptor.close()
for bom, encoding in BOMS:
if first_chars.startswith(bom):
return encoding
# TODO: maybe a chardet integration
return cls.DEFAULT_ENCODING
@classmethod
def _open_unicode_file(cls, path, claimed_encoding=None):
encoding = claimed_encoding or cls._detect_encoding(path)
source_file = codecs.open(path, 'rU', encoding=encoding)
# get rid of BOM if any
possible_bom = CODECS_BOMS.get(encoding, None)
if possible_bom:
file_bom = source_file.read(len(possible_bom))
if not file_bom == possible_bom:
source_file.seek(0) # if not rewind
return source_file
@classmethod
def _handle_error(cls, error, error_handling, index):
if error_handling == cls.ERROR_RAISE:
error.args = (index, ) + error.args
raise error
if error_handling == cls.ERROR_LOG:
name = type(error).__name__
sys.stderr.write('PySRT-%s(line %s): \n' % (name, index))
sys.stderr.write(error.args[0].encode('ascii', 'replace'))
sys.stderr.write('\n')
|
gpl-3.0
|
briancurtin/libcloud
|
libcloud/test/compute/test_rimuhosting.py
|
46
|
4370
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2009 RedRata Ltd
import sys
import unittest
from libcloud.utils.py3 import httplib
from libcloud.compute.drivers.rimuhosting import RimuHostingNodeDriver
from libcloud.test import MockHttp
from libcloud.test.compute import TestCaseMixin
from libcloud.test.file_fixtures import ComputeFileFixtures
class RimuHostingTest(unittest.TestCase, TestCaseMixin):
def setUp(self):
RimuHostingNodeDriver.connectionCls.conn_classes = (None,
RimuHostingMockHttp)
self.driver = RimuHostingNodeDriver('foo')
def test_list_nodes(self):
nodes = self.driver.list_nodes()
self.assertEqual(len(nodes), 1)
node = nodes[0]
self.assertEqual(node.public_ips[0], "1.2.3.4")
self.assertEqual(node.public_ips[1], "1.2.3.5")
self.assertEqual(node.extra['order_oid'], 88833465)
self.assertEqual(node.id, "order-88833465-api-ivan-net-nz")
def test_list_sizes(self):
sizes = self.driver.list_sizes()
self.assertEqual(len(sizes), 1)
size = sizes[0]
self.assertEqual(size.ram, 950)
self.assertEqual(size.disk, 20)
self.assertEqual(size.bandwidth, 75)
self.assertEqual(size.price, 32.54)
def test_list_images(self):
images = self.driver.list_images()
self.assertEqual(len(images), 6)
image = images[0]
self.assertEqual(image.name, "Debian 5.0 (aka Lenny, RimuHosting"
" recommended distro)")
self.assertEqual(image.id, "lenny")
def test_reboot_node(self):
# Raises exception on failure
node = self.driver.list_nodes()[0]
self.driver.reboot_node(node)
def test_destroy_node(self):
# Raises exception on failure
node = self.driver.list_nodes()[0]
self.driver.destroy_node(node)
def test_create_node(self):
# Raises exception on failure
size = self.driver.list_sizes()[0]
image = self.driver.list_images()[0]
self.driver.create_node(name="api.ivan.net.nz", image=image, size=size)
class RimuHostingMockHttp(MockHttp):
fixtures = ComputeFileFixtures('rimuhosting')
def _r_orders(self, method, url, body, headers):
body = self.fixtures.load('r_orders.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _r_pricing_plans(self, method, url, body, headers):
body = self.fixtures.load('r_pricing_plans.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _r_distributions(self, method, url, body, headers):
body = self.fixtures.load('r_distributions.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _r_orders_new_vps(self, method, url, body, headers):
body = self.fixtures.load('r_orders_new_vps.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _r_orders_order_88833465_api_ivan_net_nz_vps(self, method, url, body, headers):
body = self.fixtures.load(
'r_orders_order_88833465_api_ivan_net_nz_vps.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _r_orders_order_88833465_api_ivan_net_nz_vps_running_state(
self, method,
url, body,
headers):
body = self.fixtures.load(
'r_orders_order_88833465_api_ivan_net_nz_vps_running_state.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
|
apache-2.0
|
aeron15/ruffus
|
ruffus/test/test_regex_error_messages.py
|
5
|
18879
|
#!/usr/bin/env python
from __future__ import print_function
"""
test_regex_error_messages.py
test product, combine, permute, combine_with_replacement
Includes code from python.unittest with the following copyright notice:
Copyright (c) 1999-2003 Steve Purcell
Copyright (c) 2003-2010 Python Software Foundation
This module is free software, and you may redistribute it and/or modify
it under the same terms as Python itself, so long as this copyright message
and disclaimer are retained in their original form.
IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
import os
tempdir = os.path.relpath(os.path.abspath(os.path.splitext(__file__)[0])) + "/"
#sub-1s resolution in system?
one_second_per_job = None
parallelism = 2
import os
import sys
# add grandparent to search path for testing
grandparent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
sys.path.insert(0, grandparent_dir)
# module name = script name without extension
module_name = os.path.splitext(os.path.basename(__file__))[0]
# funky code to import by file name
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
ruffus_name = os.path.basename(parent_dir)
ruffus = __import__ (ruffus_name)
for attr in "pipeline_run", "pipeline_printout", "suffix", "transform", "split", "merge", "dbdict", "follows", "originate", "Pipeline", "regex":
globals()[attr] = getattr (ruffus, attr)
RUFFUS_HISTORY_FILE = ruffus.ruffus_utility.RUFFUS_HISTORY_FILE
fatal_error_input_file_does_not_match = ruffus.ruffus_exceptions.fatal_error_input_file_does_not_match
RethrownJobError = ruffus.ruffus_exceptions.RethrownJobError
import re
import unittest
import shutil
try:
from StringIO import StringIO
except:
from io import StringIO
#___________________________________________________________________________
#
# generate_initial_files1
#___________________________________________________________________________
@originate([tempdir + prefix + "_name.tmp1" for prefix in "abcdefghi"])
def generate_initial_files1(out_name):
with open(out_name, 'w') as outfile:
pass
#___________________________________________________________________________
#
# test_regex_task
#___________________________________________________________________________
@transform(
generate_initial_files1,
regex("(.*)/(?P<PREFIX>[abcd])(_name)(.tmp1)"),
r"\1/\g<PREFIX>\3.tmp2",# output file
r"\2", # extra: prefix = \2
r"\g<PREFIX>", # extra: prefix = \2
r"\4") # extra: extension
def test_regex_task(infiles, outfile,
prefix1,
prefix2,
extension):
with open(outfile, "w") as p:
pass
if prefix1 != prefix2:
raise Exception("Expecting %s == %s" % (prefix1, prefix2))
#___________________________________________________________________________
#
# test_regex_unmatched_task
#___________________________________________________________________________
@transform(
generate_initial_files1,
regex("(.*)/(?P<PREFIX>[abcd])(_name)(.xxx)"),
r"\1/\g<PREFIXA>\3.tmp2",# output file
r"\2", # extra: prefix = \2
r"\g<PREFIX>", # extra: prefix = \2
r"\4") # extra: extension
def test_regex_unmatched_task(infiles, outfile,
prefix1,
prefix2,
extension):
raise Exception("Should blow up first")
#___________________________________________________________________________
#
# test_suffix_task
#___________________________________________________________________________
@transform(
generate_initial_files1,
suffix(".tmp1"),
r".tmp2", # output file
r"\1") # extra: basename
def test_suffix_task(infile, outfile,
basename):
with open (outfile, "w") as f: pass
#___________________________________________________________________________
#
# test_suffix_unmatched_task
#___________________________________________________________________________
@transform(
generate_initial_files1,
suffix(".tmp1"),
r".tmp2", # output file
r"\2") # extra: unknown
def test_suffix_unmatched_task(infiles, outfile, unknown):
raise Exception("Should blow up first")
#___________________________________________________________________________
#
# test_suffix_unmatched_task
#___________________________________________________________________________
@transform(
generate_initial_files1,
suffix(".tmp2"),
r".tmp2") # output file
def test_suffix_unmatched_task2(infiles, outfile):
raise Exception("Should blow up first")
#___________________________________________________________________________
#
# test_product_misspelt_capture_error_task
#___________________________________________________________________________
@transform(
generate_initial_files1,
regex("(.*)/(?P<PREFIX>[abcd])(_name)(.tmp)"),
r"\1/\g<PREFIXA>\3.tmp2",# output file
r"\2", # extra: prefix = \2
r"\g<PREFIX>", # extra: prefix = \2
r"\4") # extra: extension
def test_regex_misspelt_capture_error_task(infiles, outfile,
prefix1,
prefix2,
extension):
raise Exception("Should blow up first")
#___________________________________________________________________________
#
# test_regex_misspelt_capture2_error_task
#___________________________________________________________________________
@transform(
generate_initial_files1,
regex("(.*)/(?P<PREFIX>[abcd])(_name)(.tmp)"),
r"\1/\g<PREFIX>\3.tmp2",# output file
r"\2", # extra: prefix = \2
r"\g<PREFIXA>", # extra: prefix = \2
r"\4") # extra: extension
def test_regex_misspelt_capture2_error_task(infiles, outfile,
prefix1,
prefix2,
extension):
raise Exception("Should blow up first")
#___________________________________________________________________________
#
# test_regex_out_of_range_regex_reference_error_task
#___________________________________________________________________________
@transform(
generate_initial_files1,
regex("(.*)/(?P<PREFIX>[abcd])(_name)(.tmp)"),
r"\1/\g<PREFIX>\5.tmp2",# output file
r"\2", # extra: prefix = \2
r"\g<PREFIX>", # extra: prefix = \2
r"\4") # extra: extension
def test_regex_out_of_range_regex_reference_error_task(infiles, outfile,
prefix1,
prefix2,
extension):
raise Exception("Should blow up first")
def cleanup_tmpdir():
os.system('rm -f %s %s' % (os.path.join(tempdir, '*'), RUFFUS_HISTORY_FILE))
class _AssertRaisesContext_27(object):
"""A context manager used to implement TestCase.assertRaises* methods.
Taken from python unittest2.7
"""
def __init__(self, expected, test_case, expected_regexp=None):
self.expected = expected
self.failureException = test_case.failureException
self.expected_regexp = expected_regexp
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
raise self.failureException(
"{0} not raised".format(exc_name))
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
self.exception = exc_value # store for later retrieval
if self.expected_regexp is None:
return True
expected_regexp = self.expected_regexp
if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(str(exc_value)):
raise self.failureException('"%s" does not match "%s"' %
(expected_regexp.pattern, str(exc_value)))
return True
class Test_regex_error_messages(unittest.TestCase):
def setUp(self):
try:
os.mkdir(tempdir)
except OSError:
pass
if sys.hexversion < 0x03000000:
self.assertRaisesRegex = self.assertRaisesRegexp27
if sys.hexversion < 0x02700000:
self.assertIn = self.my_assertIn
def my_assertIn (self, test_string, full_string):
self.assertTrue(test_string in full_string)
#
def assertRaisesRegexp27(self, expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Asserts that the message in a raised exception matches a regexp.
Args:
expected_exception: Exception class expected to be raised.
expected_regexp: Regexp (re pattern object or string) expected
to be found in error message.
callable_obj: Function to be called.
args: Extra args.
kwargs: Extra kwargs.
"""
context = _AssertRaisesContext_27(expected_exception, self, expected_regexp)
if callable_obj is None:
return context
with context:
callable_obj(*args, **kwargs)
#___________________________________________________________________________
#
# test regex() pipeline_printout and pipeline_run
#___________________________________________________________________________
def test_regex_printout(self):
cleanup_tmpdir()
s = StringIO()
pipeline_printout(s, [test_regex_task], verbose=5, wrap_width = 10000, pipeline= "main")
self.assertTrue(re.search('Missing files.*\[{tempdir}a_name.tmp1, {tempdir}a_name.tmp2'.format(tempdir=tempdir), s.getvalue(), re.DOTALL))
def test_regex_run(self):
"""Run transform(...,regex()...)"""
# output is up to date, but function body changed (e.g., source different)
cleanup_tmpdir()
pipeline_run([test_regex_task], verbose=0, multiprocess = parallelism, one_second_per_job = one_second_per_job, pipeline= "main")
#___________________________________________________________________________
#
# test regex() pipeline_printout and pipeline_run
#___________________________________________________________________________
def test_regex_unmatched_printout(self):
cleanup_tmpdir()
s = StringIO()
pipeline_printout(s, [test_regex_unmatched_task], verbose=5, wrap_width = 10000, pipeline= "main")
self.assertIn("Warning: Input substitution failed:", s.getvalue())
self.assertIn("File '{tempdir}a_name.tmp1' does not match regex".format(tempdir=tempdir), s.getvalue())
def test_regex_unmatched_run(self):
"""Run transform(...,regex()...)"""
# output is up to date, but function body changed (e.g., source different)
cleanup_tmpdir()
pipeline_run([test_regex_unmatched_task], verbose=0, multiprocess = parallelism, one_second_per_job = one_second_per_job, pipeline= "main")
#___________________________________________________________________________
#
# test suffix() pipeline_printout and pipeline_run
#___________________________________________________________________________
def test_suffix_printout(self):
cleanup_tmpdir()
s = StringIO()
pipeline_printout(s, [test_suffix_task], verbose=5, wrap_width = 10000, pipeline= "main")
self.assertTrue(re.search('Missing files.*\[{tempdir}a_name.tmp1, {tempdir}a_name.tmp2'.format(tempdir=tempdir), s.getvalue(), re.DOTALL))
def test_suffix_run(self):
"""Run transform(...,suffix()...)"""
# output is up to date, but function body changed (e.g., source different)
cleanup_tmpdir()
pipeline_run([test_suffix_task], verbose=0, multiprocess = parallelism, one_second_per_job = one_second_per_job, pipeline= "main")
#___________________________________________________________________________
#
# test suffix() pipeline_printout and pipeline_run
#___________________________________________________________________________
def test_suffix_unmatched(self):
cleanup_tmpdir()
s = StringIO()
self.assertRaisesRegex(fatal_error_input_file_does_not_match,
"File '.*?' does not match regex\('.*?'\) and pattern '.*?':\n.*invalid group reference",
pipeline_printout,
s, [test_suffix_unmatched_task],
verbose = 3)
self.assertRaisesRegex(RethrownJobError,
"File '.*?' does not match regex\('.*?'\) and pattern '.*?':\n.*invalid group reference",
pipeline_run,
[test_suffix_unmatched_task], verbose = 0, multiprocess = parallelism)
#___________________________________________________________________________
#
# test suffix() pipeline_printout and pipeline_run
#___________________________________________________________________________
def test_suffix_unmatched_printout2(self):
cleanup_tmpdir()
s = StringIO()
pipeline_printout(s, [test_suffix_unmatched_task2], verbose=5, wrap_width = 10000, pipeline= "main")
self.assertIn("Warning: Input substitution failed:", s.getvalue())
self.assertIn("File '{tempdir}a_name.tmp1' does not match suffix".format(tempdir=tempdir), s.getvalue())
def test_suffix_unmatched_run2(self):
"""Run transform(...,suffix()...)"""
# output is up to date, but function body changed (e.g., source different)
cleanup_tmpdir()
pipeline_run([test_suffix_unmatched_task2], verbose=0, multiprocess = parallelism, one_second_per_job = one_second_per_job, pipeline= "main")
#___________________________________________________________________________
#
# test regex() errors: func pipeline_printout
#___________________________________________________________________________
def test_regex_misspelt_capture_error(self):
cleanup_tmpdir()
s = StringIO()
self.assertRaisesRegex(fatal_error_input_file_does_not_match,
"File '.*?' does not match regex\('.*?'\) and pattern '.*?':\n.*unknown group name",
pipeline_printout,
s, [test_regex_misspelt_capture_error_task],
verbose = 3)
self.assertRaisesRegex(RethrownJobError,
"File '.*?' does not match regex\('.*?'\) and pattern '.*?':\n.*unknown group name",
pipeline_run,
[test_regex_misspelt_capture_error_task], verbose = 0)
#___________________________________________________________________________
#
# test regex() errors: func pipeline_printout
#___________________________________________________________________________
def test_regex_misspelt_capture2_error(self):
cleanup_tmpdir()
s = StringIO()
self.assertRaisesRegex(fatal_error_input_file_does_not_match,
"File '.*?' does not match regex\('.*?'\) and pattern '.*?':\n.*unknown group name",
pipeline_printout,
s, [test_regex_misspelt_capture2_error_task],
verbose = 3)
self.assertRaisesRegex(RethrownJobError,
"File '.*?' does not match regex\('.*?'\) and pattern '.*?':\n.*unknown group name",
pipeline_run,
[test_regex_misspelt_capture2_error_task], verbose = 0, multiprocess = parallelism)
#___________________________________________________________________________
#
# test regex() errors: func pipeline_printout
#___________________________________________________________________________
def test_regex_out_of_range_regex_reference_error_printout(self):
cleanup_tmpdir()
s = StringIO()
self.assertRaisesRegex(fatal_error_input_file_does_not_match,
"File '.*?' does not match regex\('.*?'\) and pattern '.*?':\n.*invalid group reference",
pipeline_printout,
s, [test_regex_out_of_range_regex_reference_error_task],
verbose = 3)
self.assertRaisesRegex(RethrownJobError,
"File '.*?' does not match regex\('.*?'\) and pattern '.*?':\n.*invalid group reference",
pipeline_run,
[test_regex_out_of_range_regex_reference_error_task], verbose = 0, multiprocess = parallelism)
#___________________________________________________________________________
#
# cleanup
#___________________________________________________________________________
def tearDown(self):
pass
shutil.rmtree(tempdir)
#
# Necessary to protect the "entry point" of the program under windows.
# see: http://docs.python.org/library/multiprocessing.html#multiprocessing-programming
#
if __name__ == '__main__':
#pipeline_printout(sys.stdout, [test_product_task], verbose = 3, pipeline= "main")
parallelism = 1
suite = unittest.TestLoader().loadTestsFromTestCase(Test_regex_error_messages)
unittest.TextTestRunner(verbosity=1).run(suite)
parallelism = 2
suite = unittest.TestLoader().loadTestsFromTestCase(Test_regex_error_messages)
unittest.TextTestRunner(verbosity=1).run(suite)
#unittest.main()
|
mit
|
EmanueleCannizzaro/scons
|
test/Alias/action.py
|
1
|
4351
|
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
"""
Test that Aliases with actions work.
"""
__revision__ = "test/Alias/action.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """
def cat(target, source, env):
target = str(target[0])
f = open(target, "wb")
for src in source:
f.write(open(str(src), "rb").read())
f.close()
def foo(target, source, env):
target = list(map(str, target))
source = list(map(str, source))
open('foo', 'wb').write("foo(%s, %s)\\n" % (target, source))
def bar(target, source, env):
target = list(map(str, target))
source = list(map(str, source))
open('bar', 'wb').write("bar(%s, %s)\\n" % (target, source))
env = Environment(BUILDERS = {'Cat':Builder(action=cat)})
env.Alias(target = ['build-f1'], source = 'f1.out', action = foo)
f1 = env.Cat('f1.out', 'f1.in')
f2 = env.Cat('f2.out', 'f2.in')
f3 = env.Cat('f3.out', 'f3.in')
f4 = env.Cat('f4.out', 'f4.in')
f5 = env.Cat('f5.out', 'f5.in')
f6 = env.Cat('f6.out', 'f6.in')
env.Alias('build-all', [f1, f2, f3], foo)
env.Alias('build-add1', f3, foo)
env.Alias('build-add1', f2)
env.Alias('build-add2a', f4)
env.Alias('build-add2b', f5)
env.Alias(['build-add2a', 'build-add2b'], action=foo)
env.Alias('build-add3', f6)
env.Alias('build-add3', action=foo)
env.Alias('build-add3', action=bar)
""")
test.write('f1.in', "f1.in 1\n")
test.write('f2.in', "f2.in 1\n")
test.write('f3.in', "f3.in 1\n")
test.write('f4.in', "f4.in 1\n")
test.write('f5.in', "f5.in 1\n")
test.write('f6.in', "f6.in 1\n")
test.run(arguments = 'build-f1')
test.must_match('f1.out', "f1.in 1\n")
test.must_match('foo', "foo(['build-f1'], ['f1.out'])\n")
test.up_to_date(arguments = 'build-f1')
test.write('f1.in', "f1.in 2\n")
test.unlink('foo')
test.run(arguments = 'build-f1')
test.must_match('f1.out', "f1.in 2\n")
test.must_match('foo', "foo(['build-f1'], ['f1.out'])\n")
test.run(arguments = 'build-all')
test.must_match('f1.out', "f1.in 2\n")
test.must_match('f2.out', "f2.in 1\n")
test.must_match('f3.out', "f3.in 1\n")
test.must_match('foo', "foo(['build-all'], ['f1.out', 'f2.out', 'f3.out'])\n")
test.up_to_date(arguments = 'build-all')
test.up_to_date(arguments = 'build-add1')
test.write('f1.in', "f1.in 3\n")
test.write('f3.in', "f3.in 2\n")
test.unlink('foo')
test.run(arguments = 'build-add1')
test.must_match('f1.out', "f1.in 2\n")
test.must_match('f2.out', "f2.in 1\n")
test.must_match('f3.out', "f3.in 2\n")
test.must_match('foo', "foo(['build-add1'], ['f3.out', 'f2.out'])\n")
test.up_to_date(arguments = 'build-add1')
test.run(arguments = 'build-add2a')
test.must_match('f4.out', "f4.in 1\n")
test.must_not_exist('f5.out')
test.must_match('foo', "foo(['build-add2a'], ['f4.out'])\n")
test.run(arguments = 'build-add2b')
test.must_match('f5.out', "f5.in 1\n")
test.must_match('foo', "foo(['build-add2b'], ['f5.out'])\n")
test.run(arguments = 'build-add3')
test.must_match('f6.out', "f6.in 1\n")
test.must_match('foo', "foo(['build-add3'], ['f6.out'])\n")
test.must_match('bar', "bar(['build-add3'], ['f6.out'])\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
mit
|
andela-earinde/bellatrix-py
|
app/js/lib/lib/modules/locale.py
|
79
|
99412
|
""" Locale support.
The module provides low-level access to the C lib's locale APIs
and adds high level number formatting APIs as well as a locale
aliasing engine to complement these.
The aliasing engine includes support for many commonly used locale
names and maps them to values suitable for passing to the C lib's
setlocale() function. It also includes default encodings for all
supported locale names.
"""
import sys
import encodings
import encodings.aliases
import re
import operator
import functools
try:
_unicode = unicode
except NameError:
# If Python is built without Unicode support, the unicode type
# will not exist. Fake one.
class _unicode(object):
pass
# Try importing the _locale module.
#
# If this fails, fall back on a basic 'C' locale emulation.
# Yuck: LC_MESSAGES is non-standard: can't tell whether it exists before
# trying the import. So __all__ is also fiddled at the end of the file.
__all__ = ["getlocale", "getdefaultlocale", "getpreferredencoding", "Error",
"setlocale", "resetlocale", "localeconv", "strcoll", "strxfrm",
"str", "atof", "atoi", "format", "format_string", "currency",
"normalize", "LC_CTYPE", "LC_COLLATE", "LC_TIME", "LC_MONETARY",
"LC_NUMERIC", "LC_ALL", "CHAR_MAX"]
try:
from _locale import *
except ImportError:
# Locale emulation
CHAR_MAX = 127
LC_ALL = 6
LC_COLLATE = 3
LC_CTYPE = 0
LC_MESSAGES = 5
LC_MONETARY = 4
LC_NUMERIC = 1
LC_TIME = 2
Error = ValueError
def localeconv():
""" localeconv() -> dict.
Returns numeric and monetary locale-specific parameters.
"""
# 'C' locale default values
return {'grouping': [127],
'currency_symbol': '',
'n_sign_posn': 127,
'p_cs_precedes': 127,
'n_cs_precedes': 127,
'mon_grouping': [],
'n_sep_by_space': 127,
'decimal_point': '.',
'negative_sign': '',
'positive_sign': '',
'p_sep_by_space': 127,
'int_curr_symbol': '',
'p_sign_posn': 127,
'thousands_sep': '',
'mon_thousands_sep': '',
'frac_digits': 127,
'mon_decimal_point': '',
'int_frac_digits': 127}
def setlocale(category, value=None):
""" setlocale(integer,string=None) -> string.
Activates/queries locale processing.
"""
if value not in (None, '', 'C'):
raise Error, '_locale emulation only supports "C" locale'
return 'C'
def strcoll(a,b):
""" strcoll(string,string) -> int.
Compares two strings according to the locale.
"""
return cmp(a,b)
def strxfrm(s):
""" strxfrm(string) -> string.
Returns a string that behaves for cmp locale-aware.
"""
return s
_localeconv = localeconv
# With this dict, you can override some items of localeconv's return value.
# This is useful for testing purposes.
_override_localeconv = {}
@functools.wraps(_localeconv)
def localeconv():
d = _localeconv()
if _override_localeconv:
d.update(_override_localeconv)
return d
### Number formatting APIs
# Author: Martin von Loewis
# improved by Georg Brandl
# Iterate over grouping intervals
def _grouping_intervals(grouping):
last_interval = None
for interval in grouping:
# if grouping is -1, we are done
if interval == CHAR_MAX:
return
# 0: re-use last group ad infinitum
if interval == 0:
if last_interval is None:
raise ValueError("invalid grouping")
while True:
yield last_interval
yield interval
last_interval = interval
#perform the grouping from right to left
def _group(s, monetary=False):
conv = localeconv()
thousands_sep = conv[monetary and 'mon_thousands_sep' or 'thousands_sep']
grouping = conv[monetary and 'mon_grouping' or 'grouping']
if not grouping:
return (s, 0)
if s[-1] == ' ':
stripped = s.rstrip()
right_spaces = s[len(stripped):]
s = stripped
else:
right_spaces = ''
left_spaces = ''
groups = []
for interval in _grouping_intervals(grouping):
if not s or s[-1] not in "0123456789":
# only non-digit characters remain (sign, spaces)
left_spaces = s
s = ''
break
groups.append(s[-interval:])
s = s[:-interval]
if s:
groups.append(s)
groups.reverse()
return (
left_spaces + thousands_sep.join(groups) + right_spaces,
len(thousands_sep) * (len(groups) - 1)
)
# Strip a given amount of excess padding from the given string
def _strip_padding(s, amount):
lpos = 0
while amount and s[lpos] == ' ':
lpos += 1
amount -= 1
rpos = len(s) - 1
while amount and s[rpos] == ' ':
rpos -= 1
amount -= 1
return s[lpos:rpos+1]
_percent_re = re.compile(r'%(?:\((?P<key>.*?)\))?'
r'(?P<modifiers>[-#0-9 +*.hlL]*?)[eEfFgGdiouxXcrs%]')
def format(percent, value, grouping=False, monetary=False, *additional):
"""Returns the locale-aware substitution of a %? specifier
(percent).
additional is for format strings which contain one or more
'*' modifiers."""
# this is only for one-percent-specifier strings and this should be checked
match = _percent_re.match(percent)
if not match or len(match.group())!= len(percent):
raise ValueError(("format() must be given exactly one %%char "
"format specifier, %s not valid") % repr(percent))
return _format(percent, value, grouping, monetary, *additional)
def _format(percent, value, grouping=False, monetary=False, *additional):
if additional:
formatted = percent % ((value,) + additional)
else:
formatted = percent % value
# floats and decimal ints need special action!
if percent[-1] in 'eEfFgG':
seps = 0
parts = formatted.split('.')
if grouping:
parts[0], seps = _group(parts[0], monetary=monetary)
decimal_point = localeconv()[monetary and 'mon_decimal_point'
or 'decimal_point']
formatted = decimal_point.join(parts)
if seps:
formatted = _strip_padding(formatted, seps)
elif percent[-1] in 'diu':
seps = 0
if grouping:
formatted, seps = _group(formatted, monetary=monetary)
if seps:
formatted = _strip_padding(formatted, seps)
return formatted
def format_string(f, val, grouping=False):
"""Formats a string in the same way that the % formatting would use,
but takes the current locale into account.
Grouping is applied if the third parameter is true."""
percents = list(_percent_re.finditer(f))
new_f = _percent_re.sub('%s', f)
if operator.isMappingType(val):
new_val = []
for perc in percents:
if perc.group()[-1]=='%':
new_val.append('%')
else:
new_val.append(format(perc.group(), val, grouping))
else:
if not isinstance(val, tuple):
val = (val,)
new_val = []
i = 0
for perc in percents:
if perc.group()[-1]=='%':
new_val.append('%')
else:
starcount = perc.group('modifiers').count('*')
new_val.append(_format(perc.group(),
val[i],
grouping,
False,
*val[i+1:i+1+starcount]))
i += (1 + starcount)
val = tuple(new_val)
return new_f % val
def currency(val, symbol=True, grouping=False, international=False):
"""Formats val according to the currency settings
in the current locale."""
conv = localeconv()
# check for illegal values
digits = conv[international and 'int_frac_digits' or 'frac_digits']
if digits == 127:
raise ValueError("Currency formatting is not possible using "
"the 'C' locale.")
s = format('%%.%if' % digits, abs(val), grouping, monetary=True)
# '<' and '>' are markers if the sign must be inserted between symbol and value
s = '<' + s + '>'
if symbol:
smb = conv[international and 'int_curr_symbol' or 'currency_symbol']
precedes = conv[val<0 and 'n_cs_precedes' or 'p_cs_precedes']
separated = conv[val<0 and 'n_sep_by_space' or 'p_sep_by_space']
if precedes:
s = smb + (separated and ' ' or '') + s
else:
s = s + (separated and ' ' or '') + smb
sign_pos = conv[val<0 and 'n_sign_posn' or 'p_sign_posn']
sign = conv[val<0 and 'negative_sign' or 'positive_sign']
if sign_pos == 0:
s = '(' + s + ')'
elif sign_pos == 1:
s = sign + s
elif sign_pos == 2:
s = s + sign
elif sign_pos == 3:
s = s.replace('<', sign)
elif sign_pos == 4:
s = s.replace('>', sign)
else:
# the default if nothing specified;
# this should be the most fitting sign position
s = sign + s
return s.replace('<', '').replace('>', '')
def str(val):
"""Convert float to integer, taking the locale into account."""
return format("%.12g", val)
def atof(string, func=float):
"Parses a string as a float according to the locale settings."
#First, get rid of the grouping
ts = localeconv()['thousands_sep']
if ts:
string = string.replace(ts, '')
#next, replace the decimal point with a dot
dd = localeconv()['decimal_point']
if dd:
string = string.replace(dd, '.')
#finally, parse the string
return func(string)
def atoi(str):
"Converts a string to an integer according to the locale settings."
return atof(str, int)
def _test():
setlocale(LC_ALL, "")
#do grouping
s1 = format("%d", 123456789,1)
print s1, "is", atoi(s1)
#standard formatting
s1 = str(3.14)
print s1, "is", atof(s1)
### Locale name aliasing engine
# Author: Marc-Andre Lemburg, [email protected]
# Various tweaks by Fredrik Lundh <[email protected]>
# store away the low-level version of setlocale (it's
# overridden below)
_setlocale = setlocale
# Avoid relying on the locale-dependent .lower() method
# (see issue #1813).
_ascii_lower_map = ''.join(
chr(x + 32 if x >= ord('A') and x <= ord('Z') else x)
for x in range(256)
)
def _replace_encoding(code, encoding):
if '.' in code:
langname = code[:code.index('.')]
else:
langname = code
# Convert the encoding to a C lib compatible encoding string
norm_encoding = encodings.normalize_encoding(encoding)
#print('norm encoding: %r' % norm_encoding)
norm_encoding = encodings.aliases.aliases.get(norm_encoding,
norm_encoding)
#print('aliased encoding: %r' % norm_encoding)
encoding = locale_encoding_alias.get(norm_encoding,
norm_encoding)
#print('found encoding %r' % encoding)
return langname + '.' + encoding
def normalize(localename):
""" Returns a normalized locale code for the given locale
name.
The returned locale code is formatted for use with
setlocale().
If normalization fails, the original name is returned
unchanged.
If the given encoding is not known, the function defaults to
the default encoding for the locale code just like setlocale()
does.
"""
# Normalize the locale name and extract the encoding and modifier
if isinstance(localename, _unicode):
localename = localename.encode('ascii')
code = localename.translate(_ascii_lower_map)
if ':' in code:
# ':' is sometimes used as encoding delimiter.
code = code.replace(':', '.')
if '@' in code:
code, modifier = code.split('@', 1)
else:
modifier = ''
if '.' in code:
langname, encoding = code.split('.')[:2]
else:
langname = code
encoding = ''
# First lookup: fullname (possibly with encoding and modifier)
lang_enc = langname
if encoding:
norm_encoding = encoding.replace('-', '')
norm_encoding = norm_encoding.replace('_', '')
lang_enc += '.' + norm_encoding
lookup_name = lang_enc
if modifier:
lookup_name += '@' + modifier
code = locale_alias.get(lookup_name, None)
if code is not None:
return code
#print('first lookup failed')
if modifier:
# Second try: fullname without modifier (possibly with encoding)
code = locale_alias.get(lang_enc, None)
if code is not None:
#print('lookup without modifier succeeded')
if '@' not in code:
return code + '@' + modifier
if code.split('@', 1)[1].translate(_ascii_lower_map) == modifier:
return code
#print('second lookup failed')
if encoding:
# Third try: langname (without encoding, possibly with modifier)
lookup_name = langname
if modifier:
lookup_name += '@' + modifier
code = locale_alias.get(lookup_name, None)
if code is not None:
#print('lookup without encoding succeeded')
if '@' not in code:
return _replace_encoding(code, encoding)
code, modifier = code.split('@', 1)
return _replace_encoding(code, encoding) + '@' + modifier
if modifier:
# Fourth try: langname (without encoding and modifier)
code = locale_alias.get(langname, None)
if code is not None:
#print('lookup without modifier and encoding succeeded')
if '@' not in code:
return _replace_encoding(code, encoding) + '@' + modifier
code, defmod = code.split('@', 1)
if defmod.translate(_ascii_lower_map) == modifier:
return _replace_encoding(code, encoding) + '@' + defmod
return localename
def _parse_localename(localename):
""" Parses the locale code for localename and returns the
result as tuple (language code, encoding).
The localename is normalized and passed through the locale
alias engine. A ValueError is raised in case the locale name
cannot be parsed.
The language code corresponds to RFC 1766. code and encoding
can be None in case the values cannot be determined or are
unknown to this implementation.
"""
code = normalize(localename)
if '@' in code:
# Deal with locale modifiers
code, modifier = code.split('@', 1)
if modifier == 'euro' and '.' not in code:
# Assume Latin-9 for @euro locales. This is bogus,
# since some systems may use other encodings for these
# locales. Also, we ignore other modifiers.
return code, 'iso-8859-15'
if '.' in code:
return tuple(code.split('.')[:2])
elif code == 'C':
return None, None
raise ValueError, 'unknown locale: %s' % localename
def _build_localename(localetuple):
""" Builds a locale code from the given tuple (language code,
encoding).
No aliasing or normalizing takes place.
"""
language, encoding = localetuple
if language is None:
language = 'C'
if encoding is None:
return language
else:
return language + '.' + encoding
def getdefaultlocale(envvars=('LC_ALL', 'LC_CTYPE', 'LANG', 'LANGUAGE')):
""" Tries to determine the default locale settings and returns
them as tuple (language code, encoding).
According to POSIX, a program which has not called
setlocale(LC_ALL, "") runs using the portable 'C' locale.
Calling setlocale(LC_ALL, "") lets it use the default locale as
defined by the LANG variable. Since we don't want to interfere
with the current locale setting we thus emulate the behavior
in the way described above.
To maintain compatibility with other platforms, not only the
LANG variable is tested, but a list of variables given as
envvars parameter. The first found to be defined will be
used. envvars defaults to the search path used in GNU gettext;
it must always contain the variable name 'LANG'.
Except for the code 'C', the language code corresponds to RFC
1766. code and encoding can be None in case the values cannot
be determined.
"""
try:
# check if it's supported by the _locale module
import _locale
code, encoding = _locale._getdefaultlocale()
except (ImportError, AttributeError):
pass
else:
# make sure the code/encoding values are valid
if sys.platform == "win32" and code and code[:2] == "0x":
# map windows language identifier to language name
code = windows_locale.get(int(code, 0))
# ...add other platform-specific processing here, if
# necessary...
return code, encoding
# fall back on POSIX behaviour
import os
lookup = os.environ.get
for variable in envvars:
localename = lookup(variable,None)
if localename:
if variable == 'LANGUAGE':
localename = localename.split(':')[0]
break
else:
localename = 'C'
return _parse_localename(localename)
def getlocale(category=LC_CTYPE):
""" Returns the current setting for the given locale category as
tuple (language code, encoding).
category may be one of the LC_* value except LC_ALL. It
defaults to LC_CTYPE.
Except for the code 'C', the language code corresponds to RFC
1766. code and encoding can be None in case the values cannot
be determined.
"""
localename = _setlocale(category)
if category == LC_ALL and ';' in localename:
raise TypeError, 'category LC_ALL is not supported'
return _parse_localename(localename)
def setlocale(category, locale=None):
""" Set the locale for the given category. The locale can be
a string, an iterable of two strings (language code and encoding),
or None.
Iterables are converted to strings using the locale aliasing
engine. Locale strings are passed directly to the C lib.
category may be given as one of the LC_* values.
"""
if locale and type(locale) is not type(""):
# convert to string
locale = normalize(_build_localename(locale))
return _setlocale(category, locale)
def resetlocale(category=LC_ALL):
""" Sets the locale for category to the default setting.
The default setting is determined by calling
getdefaultlocale(). category defaults to LC_ALL.
"""
_setlocale(category, _build_localename(getdefaultlocale()))
if sys.platform.startswith("win"):
# On Win32, this will return the ANSI code page
def getpreferredencoding(do_setlocale = True):
"""Return the charset that the user is likely using."""
import _locale
return _locale._getdefaultlocale()[1]
else:
# On Unix, if CODESET is available, use that.
try:
CODESET
except NameError:
# Fall back to parsing environment variables :-(
def getpreferredencoding(do_setlocale = True):
"""Return the charset that the user is likely using,
by looking at environment variables."""
return getdefaultlocale()[1]
else:
def getpreferredencoding(do_setlocale = True):
"""Return the charset that the user is likely using,
according to the system configuration."""
if do_setlocale:
oldloc = setlocale(LC_CTYPE)
try:
setlocale(LC_CTYPE, "")
except Error:
pass
result = nl_langinfo(CODESET)
setlocale(LC_CTYPE, oldloc)
return result
else:
return nl_langinfo(CODESET)
### Database
#
# The following data was extracted from the locale.alias file which
# comes with X11 and then hand edited removing the explicit encoding
# definitions and adding some more aliases. The file is usually
# available as /usr/lib/X11/locale/locale.alias.
#
#
# The local_encoding_alias table maps lowercase encoding alias names
# to C locale encoding names (case-sensitive). Note that normalize()
# first looks up the encoding in the encodings.aliases dictionary and
# then applies this mapping to find the correct C lib name for the
# encoding.
#
locale_encoding_alias = {
# Mappings for non-standard encoding names used in locale names
'437': 'C',
'c': 'C',
'en': 'ISO8859-1',
'jis': 'JIS7',
'jis7': 'JIS7',
'ajec': 'eucJP',
# Mappings from Python codec names to C lib encoding names
'ascii': 'ISO8859-1',
'latin_1': 'ISO8859-1',
'iso8859_1': 'ISO8859-1',
'iso8859_10': 'ISO8859-10',
'iso8859_11': 'ISO8859-11',
'iso8859_13': 'ISO8859-13',
'iso8859_14': 'ISO8859-14',
'iso8859_15': 'ISO8859-15',
'iso8859_16': 'ISO8859-16',
'iso8859_2': 'ISO8859-2',
'iso8859_3': 'ISO8859-3',
'iso8859_4': 'ISO8859-4',
'iso8859_5': 'ISO8859-5',
'iso8859_6': 'ISO8859-6',
'iso8859_7': 'ISO8859-7',
'iso8859_8': 'ISO8859-8',
'iso8859_9': 'ISO8859-9',
'iso2022_jp': 'JIS7',
'shift_jis': 'SJIS',
'tactis': 'TACTIS',
'euc_jp': 'eucJP',
'euc_kr': 'eucKR',
'utf_8': 'UTF-8',
'koi8_r': 'KOI8-R',
'koi8_u': 'KOI8-U',
# XXX This list is still incomplete. If you know more
# mappings, please file a bug report. Thanks.
}
#
# The locale_alias table maps lowercase alias names to C locale names
# (case-sensitive). Encodings are always separated from the locale
# name using a dot ('.'); they should only be given in case the
# language name is needed to interpret the given encoding alias
# correctly (CJK codes often have this need).
#
# Note that the normalize() function which uses this tables
# removes '_' and '-' characters from the encoding part of the
# locale name before doing the lookup. This saves a lot of
# space in the table.
#
# MAL 2004-12-10:
# Updated alias mapping to most recent locale.alias file
# from X.org distribution using makelocalealias.py.
#
# These are the differences compared to the old mapping (Python 2.4
# and older):
#
# updated 'bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251'
# updated 'bg_bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251'
# updated 'bulgarian' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251'
# updated 'cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2'
# updated 'cz_cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2'
# updated 'czech' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2'
# updated 'dutch' -> 'nl_BE.ISO8859-1' to 'nl_NL.ISO8859-1'
# updated 'et' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15'
# updated 'et_ee' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15'
# updated 'fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15'
# updated 'fi_fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15'
# updated 'iw' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'
# updated 'iw_il' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'
# updated 'japanese' -> 'ja_JP.SJIS' to 'ja_JP.eucJP'
# updated 'lt' -> 'lt_LT.ISO8859-4' to 'lt_LT.ISO8859-13'
# updated 'lv' -> 'lv_LV.ISO8859-4' to 'lv_LV.ISO8859-13'
# updated 'sl' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2'
# updated 'slovene' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2'
# updated 'th_th' -> 'th_TH.TACTIS' to 'th_TH.ISO8859-11'
# updated 'zh_cn' -> 'zh_CN.eucCN' to 'zh_CN.gb2312'
# updated 'zh_cn.big5' -> 'zh_TW.eucTW' to 'zh_TW.big5'
# updated 'zh_tw' -> 'zh_TW.eucTW' to 'zh_TW.big5'
#
# MAL 2008-05-30:
# Updated alias mapping to most recent locale.alias file
# from X.org distribution using makelocalealias.py.
#
# These are the differences compared to the old mapping (Python 2.5
# and older):
#
# updated 'cs_cs.iso88592' -> 'cs_CZ.ISO8859-2' to 'cs_CS.ISO8859-2'
# updated 'serbocroatian' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sh' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sh_hr.iso88592' -> 'sh_HR.ISO8859-2' to 'hr_HR.ISO8859-2'
# updated 'sh_sp' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sh_yu' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sp' -> 'sp_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sp_yu' -> 'sp_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr_sp' -> 'sr_SP.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sr_yu' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr_yu.cp1251@cyrillic' -> 'sr_YU.CP1251' to 'sr_CS.CP1251'
# updated 'sr_yu.iso88592' -> 'sr_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sr_yu.iso88595' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr_yu.iso88595@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr_yu.microsoftcp1251@cyrillic' -> 'sr_YU.CP1251' to 'sr_CS.CP1251'
# updated 'sr_yu.utf8@cyrillic' -> 'sr_YU.UTF-8' to 'sr_CS.UTF-8'
# updated 'sr_yu@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
#
# AP 2010-04-12:
# Updated alias mapping to most recent locale.alias file
# from X.org distribution using makelocalealias.py.
#
# These are the differences compared to the old mapping (Python 2.6.5
# and older):
#
# updated 'ru' -> 'ru_RU.ISO8859-5' to 'ru_RU.UTF-8'
# updated 'ru_ru' -> 'ru_RU.ISO8859-5' to 'ru_RU.UTF-8'
# updated 'serbocroatian' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'
# updated 'sh' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'
# updated 'sh_yu' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'
# updated 'sr' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8'
# updated 'sr@cyrillic' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8'
# updated 'sr@latn' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'
# updated 'sr_cs.utf8@latn' -> 'sr_CS.UTF-8' to 'sr_RS.UTF-8@latin'
# updated 'sr_cs@latn' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'
# updated 'sr_yu' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8@latin'
# updated 'sr_yu.utf8@cyrillic' -> 'sr_CS.UTF-8' to 'sr_RS.UTF-8'
# updated 'sr_yu@cyrillic' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8'
#
# SS 2013-12-20:
# Updated alias mapping to most recent locale.alias file
# from X.org distribution using makelocalealias.py.
#
# These are the differences compared to the old mapping (Python 2.7.6
# and older):
#
# updated 'a3' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C'
# updated 'a3_az' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C'
# updated 'a3_az.koi8c' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C'
# updated 'cs_cs.iso88592' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2'
# updated 'hebrew' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'
# updated 'hebrew.iso88598' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'
# updated 'sd' -> '[email protected]' to 'sd_IN.UTF-8'
# updated 'sr@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin'
# updated 'sr_cs' -> 'sr_RS.UTF-8' to 'sr_CS.UTF-8'
# updated 'sr_cs.utf8@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin'
# updated 'sr_cs@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin'
#
# SS 2014-10-01:
# Updated alias mapping with glibc 2.19 supported locales.
locale_alias = {
'a3': 'az_AZ.KOI8-C',
'a3_az': 'az_AZ.KOI8-C',
'a3_az.koi8c': 'az_AZ.KOI8-C',
'a3_az.koic': 'az_AZ.KOI8-C',
'aa_dj': 'aa_DJ.ISO8859-1',
'aa_er': 'aa_ER.UTF-8',
'aa_et': 'aa_ET.UTF-8',
'af': 'af_ZA.ISO8859-1',
'af_za': 'af_ZA.ISO8859-1',
'af_za.iso88591': 'af_ZA.ISO8859-1',
'am': 'am_ET.UTF-8',
'am_et': 'am_ET.UTF-8',
'american': 'en_US.ISO8859-1',
'american.iso88591': 'en_US.ISO8859-1',
'an_es': 'an_ES.ISO8859-15',
'ar': 'ar_AA.ISO8859-6',
'ar_aa': 'ar_AA.ISO8859-6',
'ar_aa.iso88596': 'ar_AA.ISO8859-6',
'ar_ae': 'ar_AE.ISO8859-6',
'ar_ae.iso88596': 'ar_AE.ISO8859-6',
'ar_bh': 'ar_BH.ISO8859-6',
'ar_bh.iso88596': 'ar_BH.ISO8859-6',
'ar_dz': 'ar_DZ.ISO8859-6',
'ar_dz.iso88596': 'ar_DZ.ISO8859-6',
'ar_eg': 'ar_EG.ISO8859-6',
'ar_eg.iso88596': 'ar_EG.ISO8859-6',
'ar_in': 'ar_IN.UTF-8',
'ar_iq': 'ar_IQ.ISO8859-6',
'ar_iq.iso88596': 'ar_IQ.ISO8859-6',
'ar_jo': 'ar_JO.ISO8859-6',
'ar_jo.iso88596': 'ar_JO.ISO8859-6',
'ar_kw': 'ar_KW.ISO8859-6',
'ar_kw.iso88596': 'ar_KW.ISO8859-6',
'ar_lb': 'ar_LB.ISO8859-6',
'ar_lb.iso88596': 'ar_LB.ISO8859-6',
'ar_ly': 'ar_LY.ISO8859-6',
'ar_ly.iso88596': 'ar_LY.ISO8859-6',
'ar_ma': 'ar_MA.ISO8859-6',
'ar_ma.iso88596': 'ar_MA.ISO8859-6',
'ar_om': 'ar_OM.ISO8859-6',
'ar_om.iso88596': 'ar_OM.ISO8859-6',
'ar_qa': 'ar_QA.ISO8859-6',
'ar_qa.iso88596': 'ar_QA.ISO8859-6',
'ar_sa': 'ar_SA.ISO8859-6',
'ar_sa.iso88596': 'ar_SA.ISO8859-6',
'ar_sd': 'ar_SD.ISO8859-6',
'ar_sd.iso88596': 'ar_SD.ISO8859-6',
'ar_sy': 'ar_SY.ISO8859-6',
'ar_sy.iso88596': 'ar_SY.ISO8859-6',
'ar_tn': 'ar_TN.ISO8859-6',
'ar_tn.iso88596': 'ar_TN.ISO8859-6',
'ar_ye': 'ar_YE.ISO8859-6',
'ar_ye.iso88596': 'ar_YE.ISO8859-6',
'arabic': 'ar_AA.ISO8859-6',
'arabic.iso88596': 'ar_AA.ISO8859-6',
'as': 'as_IN.UTF-8',
'as_in': 'as_IN.UTF-8',
'ast_es': 'ast_ES.ISO8859-15',
'ayc_pe': 'ayc_PE.UTF-8',
'az': 'az_AZ.ISO8859-9E',
'az_az': 'az_AZ.ISO8859-9E',
'az_az.iso88599e': 'az_AZ.ISO8859-9E',
'be': 'be_BY.CP1251',
'be@latin': 'be_BY.UTF-8@latin',
'be_bg.utf8': 'bg_BG.UTF-8',
'be_by': 'be_BY.CP1251',
'be_by.cp1251': 'be_BY.CP1251',
'be_by.microsoftcp1251': 'be_BY.CP1251',
'be_by.utf8@latin': 'be_BY.UTF-8@latin',
'be_by@latin': 'be_BY.UTF-8@latin',
'bem_zm': 'bem_ZM.UTF-8',
'ber_dz': 'ber_DZ.UTF-8',
'ber_ma': 'ber_MA.UTF-8',
'bg': 'bg_BG.CP1251',
'bg_bg': 'bg_BG.CP1251',
'bg_bg.cp1251': 'bg_BG.CP1251',
'bg_bg.iso88595': 'bg_BG.ISO8859-5',
'bg_bg.koi8r': 'bg_BG.KOI8-R',
'bg_bg.microsoftcp1251': 'bg_BG.CP1251',
'bho_in': 'bho_IN.UTF-8',
'bn_bd': 'bn_BD.UTF-8',
'bn_in': 'bn_IN.UTF-8',
'bo_cn': 'bo_CN.UTF-8',
'bo_in': 'bo_IN.UTF-8',
'bokmal': 'nb_NO.ISO8859-1',
'bokm\xe5l': 'nb_NO.ISO8859-1',
'br': 'br_FR.ISO8859-1',
'br_fr': 'br_FR.ISO8859-1',
'br_fr.iso88591': 'br_FR.ISO8859-1',
'br_fr.iso885914': 'br_FR.ISO8859-14',
'br_fr.iso885915': 'br_FR.ISO8859-15',
'br_fr.iso885915@euro': 'br_FR.ISO8859-15',
'br_fr.utf8@euro': 'br_FR.UTF-8',
'br_fr@euro': 'br_FR.ISO8859-15',
'brx_in': 'brx_IN.UTF-8',
'bs': 'bs_BA.ISO8859-2',
'bs_ba': 'bs_BA.ISO8859-2',
'bs_ba.iso88592': 'bs_BA.ISO8859-2',
'bulgarian': 'bg_BG.CP1251',
'byn_er': 'byn_ER.UTF-8',
'c': 'C',
'c-french': 'fr_CA.ISO8859-1',
'c-french.iso88591': 'fr_CA.ISO8859-1',
'c.ascii': 'C',
'c.en': 'C',
'c.iso88591': 'en_US.ISO8859-1',
'c.utf8': 'en_US.UTF-8',
'c_c': 'C',
'c_c.c': 'C',
'ca': 'ca_ES.ISO8859-1',
'ca_ad': 'ca_AD.ISO8859-1',
'ca_ad.iso88591': 'ca_AD.ISO8859-1',
'ca_ad.iso885915': 'ca_AD.ISO8859-15',
'ca_ad.iso885915@euro': 'ca_AD.ISO8859-15',
'ca_ad.utf8@euro': 'ca_AD.UTF-8',
'ca_ad@euro': 'ca_AD.ISO8859-15',
'ca_es': 'ca_ES.ISO8859-1',
'ca_es.iso88591': 'ca_ES.ISO8859-1',
'ca_es.iso885915': 'ca_ES.ISO8859-15',
'ca_es.iso885915@euro': 'ca_ES.ISO8859-15',
'ca_es.utf8@euro': 'ca_ES.UTF-8',
'ca_es@valencia': 'ca_ES.ISO8859-15@valencia',
'ca_es@euro': 'ca_ES.ISO8859-15',
'ca_fr': 'ca_FR.ISO8859-1',
'ca_fr.iso88591': 'ca_FR.ISO8859-1',
'ca_fr.iso885915': 'ca_FR.ISO8859-15',
'ca_fr.iso885915@euro': 'ca_FR.ISO8859-15',
'ca_fr.utf8@euro': 'ca_FR.UTF-8',
'ca_fr@euro': 'ca_FR.ISO8859-15',
'ca_it': 'ca_IT.ISO8859-1',
'ca_it.iso88591': 'ca_IT.ISO8859-1',
'ca_it.iso885915': 'ca_IT.ISO8859-15',
'ca_it.iso885915@euro': 'ca_IT.ISO8859-15',
'ca_it.utf8@euro': 'ca_IT.UTF-8',
'ca_it@euro': 'ca_IT.ISO8859-15',
'catalan': 'ca_ES.ISO8859-1',
'cextend': 'en_US.ISO8859-1',
'cextend.en': 'en_US.ISO8859-1',
'chinese-s': 'zh_CN.eucCN',
'chinese-t': 'zh_TW.eucTW',
'crh_ua': 'crh_UA.UTF-8',
'croatian': 'hr_HR.ISO8859-2',
'cs': 'cs_CZ.ISO8859-2',
'cs_cs': 'cs_CZ.ISO8859-2',
'cs_cs.iso88592': 'cs_CZ.ISO8859-2',
'cs_cz': 'cs_CZ.ISO8859-2',
'cs_cz.iso88592': 'cs_CZ.ISO8859-2',
'csb_pl': 'csb_PL.UTF-8',
'cv_ru': 'cv_RU.UTF-8',
'cy': 'cy_GB.ISO8859-1',
'cy_gb': 'cy_GB.ISO8859-1',
'cy_gb.iso88591': 'cy_GB.ISO8859-1',
'cy_gb.iso885914': 'cy_GB.ISO8859-14',
'cy_gb.iso885915': 'cy_GB.ISO8859-15',
'cy_gb@euro': 'cy_GB.ISO8859-15',
'cz': 'cs_CZ.ISO8859-2',
'cz_cz': 'cs_CZ.ISO8859-2',
'czech': 'cs_CZ.ISO8859-2',
'da': 'da_DK.ISO8859-1',
'da.iso885915': 'da_DK.ISO8859-15',
'da_dk': 'da_DK.ISO8859-1',
'da_dk.88591': 'da_DK.ISO8859-1',
'da_dk.885915': 'da_DK.ISO8859-15',
'da_dk.iso88591': 'da_DK.ISO8859-1',
'da_dk.iso885915': 'da_DK.ISO8859-15',
'da_dk@euro': 'da_DK.ISO8859-15',
'danish': 'da_DK.ISO8859-1',
'danish.iso88591': 'da_DK.ISO8859-1',
'dansk': 'da_DK.ISO8859-1',
'de': 'de_DE.ISO8859-1',
'de.iso885915': 'de_DE.ISO8859-15',
'de_at': 'de_AT.ISO8859-1',
'de_at.iso88591': 'de_AT.ISO8859-1',
'de_at.iso885915': 'de_AT.ISO8859-15',
'de_at.iso885915@euro': 'de_AT.ISO8859-15',
'de_at.utf8@euro': 'de_AT.UTF-8',
'de_at@euro': 'de_AT.ISO8859-15',
'de_be': 'de_BE.ISO8859-1',
'de_be.iso88591': 'de_BE.ISO8859-1',
'de_be.iso885915': 'de_BE.ISO8859-15',
'de_be.iso885915@euro': 'de_BE.ISO8859-15',
'de_be.utf8@euro': 'de_BE.UTF-8',
'de_be@euro': 'de_BE.ISO8859-15',
'de_ch': 'de_CH.ISO8859-1',
'de_ch.iso88591': 'de_CH.ISO8859-1',
'de_ch.iso885915': 'de_CH.ISO8859-15',
'de_ch@euro': 'de_CH.ISO8859-15',
'de_de': 'de_DE.ISO8859-1',
'de_de.88591': 'de_DE.ISO8859-1',
'de_de.885915': 'de_DE.ISO8859-15',
'de_de.885915@euro': 'de_DE.ISO8859-15',
'de_de.iso88591': 'de_DE.ISO8859-1',
'de_de.iso885915': 'de_DE.ISO8859-15',
'de_de.iso885915@euro': 'de_DE.ISO8859-15',
'de_de.utf8@euro': 'de_DE.UTF-8',
'de_de@euro': 'de_DE.ISO8859-15',
'de_li.utf8': 'de_LI.UTF-8',
'de_lu': 'de_LU.ISO8859-1',
'de_lu.iso88591': 'de_LU.ISO8859-1',
'de_lu.iso885915': 'de_LU.ISO8859-15',
'de_lu.iso885915@euro': 'de_LU.ISO8859-15',
'de_lu.utf8@euro': 'de_LU.UTF-8',
'de_lu@euro': 'de_LU.ISO8859-15',
'deutsch': 'de_DE.ISO8859-1',
'doi_in': 'doi_IN.UTF-8',
'dutch': 'nl_NL.ISO8859-1',
'dutch.iso88591': 'nl_BE.ISO8859-1',
'dv_mv': 'dv_MV.UTF-8',
'dz_bt': 'dz_BT.UTF-8',
'ee': 'ee_EE.ISO8859-4',
'ee_ee': 'ee_EE.ISO8859-4',
'ee_ee.iso88594': 'ee_EE.ISO8859-4',
'eesti': 'et_EE.ISO8859-1',
'el': 'el_GR.ISO8859-7',
'el_cy': 'el_CY.ISO8859-7',
'el_gr': 'el_GR.ISO8859-7',
'el_gr.iso88597': 'el_GR.ISO8859-7',
'el_gr@euro': 'el_GR.ISO8859-15',
'en': 'en_US.ISO8859-1',
'en.iso88591': 'en_US.ISO8859-1',
'en_ag': 'en_AG.UTF-8',
'en_au': 'en_AU.ISO8859-1',
'en_au.iso88591': 'en_AU.ISO8859-1',
'en_be': 'en_BE.ISO8859-1',
'en_be@euro': 'en_BE.ISO8859-15',
'en_bw': 'en_BW.ISO8859-1',
'en_bw.iso88591': 'en_BW.ISO8859-1',
'en_ca': 'en_CA.ISO8859-1',
'en_ca.iso88591': 'en_CA.ISO8859-1',
'en_dk': 'en_DK.ISO8859-1',
'en_dl.utf8': 'en_DL.UTF-8',
'en_gb': 'en_GB.ISO8859-1',
'en_gb.88591': 'en_GB.ISO8859-1',
'en_gb.iso88591': 'en_GB.ISO8859-1',
'en_gb.iso885915': 'en_GB.ISO8859-15',
'en_gb@euro': 'en_GB.ISO8859-15',
'en_hk': 'en_HK.ISO8859-1',
'en_hk.iso88591': 'en_HK.ISO8859-1',
'en_ie': 'en_IE.ISO8859-1',
'en_ie.iso88591': 'en_IE.ISO8859-1',
'en_ie.iso885915': 'en_IE.ISO8859-15',
'en_ie.iso885915@euro': 'en_IE.ISO8859-15',
'en_ie.utf8@euro': 'en_IE.UTF-8',
'en_ie@euro': 'en_IE.ISO8859-15',
'en_in': 'en_IN.ISO8859-1',
'en_ng': 'en_NG.UTF-8',
'en_nz': 'en_NZ.ISO8859-1',
'en_nz.iso88591': 'en_NZ.ISO8859-1',
'en_ph': 'en_PH.ISO8859-1',
'en_ph.iso88591': 'en_PH.ISO8859-1',
'en_sg': 'en_SG.ISO8859-1',
'en_sg.iso88591': 'en_SG.ISO8859-1',
'en_uk': 'en_GB.ISO8859-1',
'en_us': 'en_US.ISO8859-1',
'en_us.88591': 'en_US.ISO8859-1',
'en_us.885915': 'en_US.ISO8859-15',
'en_us.iso88591': 'en_US.ISO8859-1',
'en_us.iso885915': 'en_US.ISO8859-15',
'en_us.iso885915@euro': 'en_US.ISO8859-15',
'en_us@euro': 'en_US.ISO8859-15',
'en_us@euro@euro': 'en_US.ISO8859-15',
'en_za': 'en_ZA.ISO8859-1',
'en_za.88591': 'en_ZA.ISO8859-1',
'en_za.iso88591': 'en_ZA.ISO8859-1',
'en_za.iso885915': 'en_ZA.ISO8859-15',
'en_za@euro': 'en_ZA.ISO8859-15',
'en_zm': 'en_ZM.UTF-8',
'en_zw': 'en_ZW.ISO8859-1',
'en_zw.iso88591': 'en_ZW.ISO8859-1',
'en_zw.utf8': 'en_ZS.UTF-8',
'eng_gb': 'en_GB.ISO8859-1',
'eng_gb.8859': 'en_GB.ISO8859-1',
'english': 'en_EN.ISO8859-1',
'english.iso88591': 'en_EN.ISO8859-1',
'english_uk': 'en_GB.ISO8859-1',
'english_uk.8859': 'en_GB.ISO8859-1',
'english_united-states': 'en_US.ISO8859-1',
'english_united-states.437': 'C',
'english_us': 'en_US.ISO8859-1',
'english_us.8859': 'en_US.ISO8859-1',
'english_us.ascii': 'en_US.ISO8859-1',
'eo': 'eo_XX.ISO8859-3',
'eo.utf8': 'eo.UTF-8',
'eo_eo': 'eo_EO.ISO8859-3',
'eo_eo.iso88593': 'eo_EO.ISO8859-3',
'eo_us.utf8': 'eo_US.UTF-8',
'eo_xx': 'eo_XX.ISO8859-3',
'eo_xx.iso88593': 'eo_XX.ISO8859-3',
'es': 'es_ES.ISO8859-1',
'es_ar': 'es_AR.ISO8859-1',
'es_ar.iso88591': 'es_AR.ISO8859-1',
'es_bo': 'es_BO.ISO8859-1',
'es_bo.iso88591': 'es_BO.ISO8859-1',
'es_cl': 'es_CL.ISO8859-1',
'es_cl.iso88591': 'es_CL.ISO8859-1',
'es_co': 'es_CO.ISO8859-1',
'es_co.iso88591': 'es_CO.ISO8859-1',
'es_cr': 'es_CR.ISO8859-1',
'es_cr.iso88591': 'es_CR.ISO8859-1',
'es_cu': 'es_CU.UTF-8',
'es_do': 'es_DO.ISO8859-1',
'es_do.iso88591': 'es_DO.ISO8859-1',
'es_ec': 'es_EC.ISO8859-1',
'es_ec.iso88591': 'es_EC.ISO8859-1',
'es_es': 'es_ES.ISO8859-1',
'es_es.88591': 'es_ES.ISO8859-1',
'es_es.iso88591': 'es_ES.ISO8859-1',
'es_es.iso885915': 'es_ES.ISO8859-15',
'es_es.iso885915@euro': 'es_ES.ISO8859-15',
'es_es.utf8@euro': 'es_ES.UTF-8',
'es_es@euro': 'es_ES.ISO8859-15',
'es_gt': 'es_GT.ISO8859-1',
'es_gt.iso88591': 'es_GT.ISO8859-1',
'es_hn': 'es_HN.ISO8859-1',
'es_hn.iso88591': 'es_HN.ISO8859-1',
'es_mx': 'es_MX.ISO8859-1',
'es_mx.iso88591': 'es_MX.ISO8859-1',
'es_ni': 'es_NI.ISO8859-1',
'es_ni.iso88591': 'es_NI.ISO8859-1',
'es_pa': 'es_PA.ISO8859-1',
'es_pa.iso88591': 'es_PA.ISO8859-1',
'es_pa.iso885915': 'es_PA.ISO8859-15',
'es_pa@euro': 'es_PA.ISO8859-15',
'es_pe': 'es_PE.ISO8859-1',
'es_pe.iso88591': 'es_PE.ISO8859-1',
'es_pe.iso885915': 'es_PE.ISO8859-15',
'es_pe@euro': 'es_PE.ISO8859-15',
'es_pr': 'es_PR.ISO8859-1',
'es_pr.iso88591': 'es_PR.ISO8859-1',
'es_py': 'es_PY.ISO8859-1',
'es_py.iso88591': 'es_PY.ISO8859-1',
'es_py.iso885915': 'es_PY.ISO8859-15',
'es_py@euro': 'es_PY.ISO8859-15',
'es_sv': 'es_SV.ISO8859-1',
'es_sv.iso88591': 'es_SV.ISO8859-1',
'es_sv.iso885915': 'es_SV.ISO8859-15',
'es_sv@euro': 'es_SV.ISO8859-15',
'es_us': 'es_US.ISO8859-1',
'es_us.iso88591': 'es_US.ISO8859-1',
'es_uy': 'es_UY.ISO8859-1',
'es_uy.iso88591': 'es_UY.ISO8859-1',
'es_uy.iso885915': 'es_UY.ISO8859-15',
'es_uy@euro': 'es_UY.ISO8859-15',
'es_ve': 'es_VE.ISO8859-1',
'es_ve.iso88591': 'es_VE.ISO8859-1',
'es_ve.iso885915': 'es_VE.ISO8859-15',
'es_ve@euro': 'es_VE.ISO8859-15',
'estonian': 'et_EE.ISO8859-1',
'et': 'et_EE.ISO8859-15',
'et_ee': 'et_EE.ISO8859-15',
'et_ee.iso88591': 'et_EE.ISO8859-1',
'et_ee.iso885913': 'et_EE.ISO8859-13',
'et_ee.iso885915': 'et_EE.ISO8859-15',
'et_ee.iso88594': 'et_EE.ISO8859-4',
'et_ee@euro': 'et_EE.ISO8859-15',
'eu': 'eu_ES.ISO8859-1',
'eu_es': 'eu_ES.ISO8859-1',
'eu_es.iso88591': 'eu_ES.ISO8859-1',
'eu_es.iso885915': 'eu_ES.ISO8859-15',
'eu_es.iso885915@euro': 'eu_ES.ISO8859-15',
'eu_es.utf8@euro': 'eu_ES.UTF-8',
'eu_es@euro': 'eu_ES.ISO8859-15',
'eu_fr': 'eu_FR.ISO8859-1',
'fa': 'fa_IR.UTF-8',
'fa_ir': 'fa_IR.UTF-8',
'fa_ir.isiri3342': 'fa_IR.ISIRI-3342',
'ff_sn': 'ff_SN.UTF-8',
'fi': 'fi_FI.ISO8859-15',
'fi.iso885915': 'fi_FI.ISO8859-15',
'fi_fi': 'fi_FI.ISO8859-15',
'fi_fi.88591': 'fi_FI.ISO8859-1',
'fi_fi.iso88591': 'fi_FI.ISO8859-1',
'fi_fi.iso885915': 'fi_FI.ISO8859-15',
'fi_fi.iso885915@euro': 'fi_FI.ISO8859-15',
'fi_fi.utf8@euro': 'fi_FI.UTF-8',
'fi_fi@euro': 'fi_FI.ISO8859-15',
'fil_ph': 'fil_PH.UTF-8',
'finnish': 'fi_FI.ISO8859-1',
'finnish.iso88591': 'fi_FI.ISO8859-1',
'fo': 'fo_FO.ISO8859-1',
'fo_fo': 'fo_FO.ISO8859-1',
'fo_fo.iso88591': 'fo_FO.ISO8859-1',
'fo_fo.iso885915': 'fo_FO.ISO8859-15',
'fo_fo@euro': 'fo_FO.ISO8859-15',
'fr': 'fr_FR.ISO8859-1',
'fr.iso885915': 'fr_FR.ISO8859-15',
'fr_be': 'fr_BE.ISO8859-1',
'fr_be.88591': 'fr_BE.ISO8859-1',
'fr_be.iso88591': 'fr_BE.ISO8859-1',
'fr_be.iso885915': 'fr_BE.ISO8859-15',
'fr_be.iso885915@euro': 'fr_BE.ISO8859-15',
'fr_be.utf8@euro': 'fr_BE.UTF-8',
'fr_be@euro': 'fr_BE.ISO8859-15',
'fr_ca': 'fr_CA.ISO8859-1',
'fr_ca.88591': 'fr_CA.ISO8859-1',
'fr_ca.iso88591': 'fr_CA.ISO8859-1',
'fr_ca.iso885915': 'fr_CA.ISO8859-15',
'fr_ca@euro': 'fr_CA.ISO8859-15',
'fr_ch': 'fr_CH.ISO8859-1',
'fr_ch.88591': 'fr_CH.ISO8859-1',
'fr_ch.iso88591': 'fr_CH.ISO8859-1',
'fr_ch.iso885915': 'fr_CH.ISO8859-15',
'fr_ch@euro': 'fr_CH.ISO8859-15',
'fr_fr': 'fr_FR.ISO8859-1',
'fr_fr.88591': 'fr_FR.ISO8859-1',
'fr_fr.iso88591': 'fr_FR.ISO8859-1',
'fr_fr.iso885915': 'fr_FR.ISO8859-15',
'fr_fr.iso885915@euro': 'fr_FR.ISO8859-15',
'fr_fr.utf8@euro': 'fr_FR.UTF-8',
'fr_fr@euro': 'fr_FR.ISO8859-15',
'fr_lu': 'fr_LU.ISO8859-1',
'fr_lu.88591': 'fr_LU.ISO8859-1',
'fr_lu.iso88591': 'fr_LU.ISO8859-1',
'fr_lu.iso885915': 'fr_LU.ISO8859-15',
'fr_lu.iso885915@euro': 'fr_LU.ISO8859-15',
'fr_lu.utf8@euro': 'fr_LU.UTF-8',
'fr_lu@euro': 'fr_LU.ISO8859-15',
'fran\xe7ais': 'fr_FR.ISO8859-1',
'fre_fr': 'fr_FR.ISO8859-1',
'fre_fr.8859': 'fr_FR.ISO8859-1',
'french': 'fr_FR.ISO8859-1',
'french.iso88591': 'fr_CH.ISO8859-1',
'french_france': 'fr_FR.ISO8859-1',
'french_france.8859': 'fr_FR.ISO8859-1',
'fur_it': 'fur_IT.UTF-8',
'fy_de': 'fy_DE.UTF-8',
'fy_nl': 'fy_NL.UTF-8',
'ga': 'ga_IE.ISO8859-1',
'ga_ie': 'ga_IE.ISO8859-1',
'ga_ie.iso88591': 'ga_IE.ISO8859-1',
'ga_ie.iso885914': 'ga_IE.ISO8859-14',
'ga_ie.iso885915': 'ga_IE.ISO8859-15',
'ga_ie.iso885915@euro': 'ga_IE.ISO8859-15',
'ga_ie.utf8@euro': 'ga_IE.UTF-8',
'ga_ie@euro': 'ga_IE.ISO8859-15',
'galego': 'gl_ES.ISO8859-1',
'galician': 'gl_ES.ISO8859-1',
'gd': 'gd_GB.ISO8859-1',
'gd_gb': 'gd_GB.ISO8859-1',
'gd_gb.iso88591': 'gd_GB.ISO8859-1',
'gd_gb.iso885914': 'gd_GB.ISO8859-14',
'gd_gb.iso885915': 'gd_GB.ISO8859-15',
'gd_gb@euro': 'gd_GB.ISO8859-15',
'ger_de': 'de_DE.ISO8859-1',
'ger_de.8859': 'de_DE.ISO8859-1',
'german': 'de_DE.ISO8859-1',
'german.iso88591': 'de_CH.ISO8859-1',
'german_germany': 'de_DE.ISO8859-1',
'german_germany.8859': 'de_DE.ISO8859-1',
'gez_er': 'gez_ER.UTF-8',
'gez_et': 'gez_ET.UTF-8',
'gl': 'gl_ES.ISO8859-1',
'gl_es': 'gl_ES.ISO8859-1',
'gl_es.iso88591': 'gl_ES.ISO8859-1',
'gl_es.iso885915': 'gl_ES.ISO8859-15',
'gl_es.iso885915@euro': 'gl_ES.ISO8859-15',
'gl_es.utf8@euro': 'gl_ES.UTF-8',
'gl_es@euro': 'gl_ES.ISO8859-15',
'greek': 'el_GR.ISO8859-7',
'greek.iso88597': 'el_GR.ISO8859-7',
'gu_in': 'gu_IN.UTF-8',
'gv': 'gv_GB.ISO8859-1',
'gv_gb': 'gv_GB.ISO8859-1',
'gv_gb.iso88591': 'gv_GB.ISO8859-1',
'gv_gb.iso885914': 'gv_GB.ISO8859-14',
'gv_gb.iso885915': 'gv_GB.ISO8859-15',
'gv_gb@euro': 'gv_GB.ISO8859-15',
'ha_ng': 'ha_NG.UTF-8',
'he': 'he_IL.ISO8859-8',
'he_il': 'he_IL.ISO8859-8',
'he_il.cp1255': 'he_IL.CP1255',
'he_il.iso88598': 'he_IL.ISO8859-8',
'he_il.microsoftcp1255': 'he_IL.CP1255',
'hebrew': 'he_IL.ISO8859-8',
'hebrew.iso88598': 'he_IL.ISO8859-8',
'hi': 'hi_IN.ISCII-DEV',
'hi_in': 'hi_IN.ISCII-DEV',
'hi_in.isciidev': 'hi_IN.ISCII-DEV',
'hne': 'hne_IN.UTF-8',
'hne_in': 'hne_IN.UTF-8',
'hr': 'hr_HR.ISO8859-2',
'hr_hr': 'hr_HR.ISO8859-2',
'hr_hr.iso88592': 'hr_HR.ISO8859-2',
'hrvatski': 'hr_HR.ISO8859-2',
'hsb_de': 'hsb_DE.ISO8859-2',
'ht_ht': 'ht_HT.UTF-8',
'hu': 'hu_HU.ISO8859-2',
'hu_hu': 'hu_HU.ISO8859-2',
'hu_hu.iso88592': 'hu_HU.ISO8859-2',
'hungarian': 'hu_HU.ISO8859-2',
'hy_am': 'hy_AM.UTF-8',
'hy_am.armscii8': 'hy_AM.ARMSCII_8',
'ia': 'ia.UTF-8',
'ia_fr': 'ia_FR.UTF-8',
'icelandic': 'is_IS.ISO8859-1',
'icelandic.iso88591': 'is_IS.ISO8859-1',
'id': 'id_ID.ISO8859-1',
'id_id': 'id_ID.ISO8859-1',
'ig_ng': 'ig_NG.UTF-8',
'ik_ca': 'ik_CA.UTF-8',
'in': 'id_ID.ISO8859-1',
'in_id': 'id_ID.ISO8859-1',
'is': 'is_IS.ISO8859-1',
'is_is': 'is_IS.ISO8859-1',
'is_is.iso88591': 'is_IS.ISO8859-1',
'is_is.iso885915': 'is_IS.ISO8859-15',
'is_is@euro': 'is_IS.ISO8859-15',
'iso-8859-1': 'en_US.ISO8859-1',
'iso-8859-15': 'en_US.ISO8859-15',
'iso8859-1': 'en_US.ISO8859-1',
'iso8859-15': 'en_US.ISO8859-15',
'iso_8859_1': 'en_US.ISO8859-1',
'iso_8859_15': 'en_US.ISO8859-15',
'it': 'it_IT.ISO8859-1',
'it.iso885915': 'it_IT.ISO8859-15',
'it_ch': 'it_CH.ISO8859-1',
'it_ch.iso88591': 'it_CH.ISO8859-1',
'it_ch.iso885915': 'it_CH.ISO8859-15',
'it_ch@euro': 'it_CH.ISO8859-15',
'it_it': 'it_IT.ISO8859-1',
'it_it.88591': 'it_IT.ISO8859-1',
'it_it.iso88591': 'it_IT.ISO8859-1',
'it_it.iso885915': 'it_IT.ISO8859-15',
'it_it.iso885915@euro': 'it_IT.ISO8859-15',
'it_it.utf8@euro': 'it_IT.UTF-8',
'it_it@euro': 'it_IT.ISO8859-15',
'italian': 'it_IT.ISO8859-1',
'italian.iso88591': 'it_IT.ISO8859-1',
'iu': 'iu_CA.NUNACOM-8',
'iu_ca': 'iu_CA.NUNACOM-8',
'iu_ca.nunacom8': 'iu_CA.NUNACOM-8',
'iw': 'he_IL.ISO8859-8',
'iw_il': 'he_IL.ISO8859-8',
'iw_il.iso88598': 'he_IL.ISO8859-8',
'iw_il.utf8': 'iw_IL.UTF-8',
'ja': 'ja_JP.eucJP',
'ja.jis': 'ja_JP.JIS7',
'ja.sjis': 'ja_JP.SJIS',
'ja_jp': 'ja_JP.eucJP',
'ja_jp.ajec': 'ja_JP.eucJP',
'ja_jp.euc': 'ja_JP.eucJP',
'ja_jp.eucjp': 'ja_JP.eucJP',
'ja_jp.iso-2022-jp': 'ja_JP.JIS7',
'ja_jp.iso2022jp': 'ja_JP.JIS7',
'ja_jp.jis': 'ja_JP.JIS7',
'ja_jp.jis7': 'ja_JP.JIS7',
'ja_jp.mscode': 'ja_JP.SJIS',
'ja_jp.pck': 'ja_JP.SJIS',
'ja_jp.sjis': 'ja_JP.SJIS',
'ja_jp.ujis': 'ja_JP.eucJP',
'japan': 'ja_JP.eucJP',
'japanese': 'ja_JP.eucJP',
'japanese-euc': 'ja_JP.eucJP',
'japanese.euc': 'ja_JP.eucJP',
'japanese.sjis': 'ja_JP.SJIS',
'jp_jp': 'ja_JP.eucJP',
'ka': 'ka_GE.GEORGIAN-ACADEMY',
'ka_ge': 'ka_GE.GEORGIAN-ACADEMY',
'ka_ge.georgianacademy': 'ka_GE.GEORGIAN-ACADEMY',
'ka_ge.georgianps': 'ka_GE.GEORGIAN-PS',
'ka_ge.georgianrs': 'ka_GE.GEORGIAN-ACADEMY',
'kk_kz': 'kk_KZ.RK1048',
'kl': 'kl_GL.ISO8859-1',
'kl_gl': 'kl_GL.ISO8859-1',
'kl_gl.iso88591': 'kl_GL.ISO8859-1',
'kl_gl.iso885915': 'kl_GL.ISO8859-15',
'kl_gl@euro': 'kl_GL.ISO8859-15',
'km_kh': 'km_KH.UTF-8',
'kn': 'kn_IN.UTF-8',
'kn_in': 'kn_IN.UTF-8',
'ko': 'ko_KR.eucKR',
'ko_kr': 'ko_KR.eucKR',
'ko_kr.euc': 'ko_KR.eucKR',
'ko_kr.euckr': 'ko_KR.eucKR',
'kok_in': 'kok_IN.UTF-8',
'korean': 'ko_KR.eucKR',
'korean.euc': 'ko_KR.eucKR',
'ks': 'ks_IN.UTF-8',
'ks_in': 'ks_IN.UTF-8',
'ks_in@devanagari': 'ks_IN.UTF-8@devanagari',
'[email protected]': 'ks_IN.UTF-8@devanagari',
'ku_tr': 'ku_TR.ISO8859-9',
'kw': 'kw_GB.ISO8859-1',
'kw_gb': 'kw_GB.ISO8859-1',
'kw_gb.iso88591': 'kw_GB.ISO8859-1',
'kw_gb.iso885914': 'kw_GB.ISO8859-14',
'kw_gb.iso885915': 'kw_GB.ISO8859-15',
'kw_gb@euro': 'kw_GB.ISO8859-15',
'ky': 'ky_KG.UTF-8',
'ky_kg': 'ky_KG.UTF-8',
'lb_lu': 'lb_LU.UTF-8',
'lg_ug': 'lg_UG.ISO8859-10',
'li_be': 'li_BE.UTF-8',
'li_nl': 'li_NL.UTF-8',
'lij_it': 'lij_IT.UTF-8',
'lithuanian': 'lt_LT.ISO8859-13',
'lo': 'lo_LA.MULELAO-1',
'lo_la': 'lo_LA.MULELAO-1',
'lo_la.cp1133': 'lo_LA.IBM-CP1133',
'lo_la.ibmcp1133': 'lo_LA.IBM-CP1133',
'lo_la.mulelao1': 'lo_LA.MULELAO-1',
'lt': 'lt_LT.ISO8859-13',
'lt_lt': 'lt_LT.ISO8859-13',
'lt_lt.iso885913': 'lt_LT.ISO8859-13',
'lt_lt.iso88594': 'lt_LT.ISO8859-4',
'lv': 'lv_LV.ISO8859-13',
'lv_lv': 'lv_LV.ISO8859-13',
'lv_lv.iso885913': 'lv_LV.ISO8859-13',
'lv_lv.iso88594': 'lv_LV.ISO8859-4',
'mag_in': 'mag_IN.UTF-8',
'mai': 'mai_IN.UTF-8',
'mai_in': 'mai_IN.UTF-8',
'mg_mg': 'mg_MG.ISO8859-15',
'mhr_ru': 'mhr_RU.UTF-8',
'mi': 'mi_NZ.ISO8859-1',
'mi_nz': 'mi_NZ.ISO8859-1',
'mi_nz.iso88591': 'mi_NZ.ISO8859-1',
'mk': 'mk_MK.ISO8859-5',
'mk_mk': 'mk_MK.ISO8859-5',
'mk_mk.cp1251': 'mk_MK.CP1251',
'mk_mk.iso88595': 'mk_MK.ISO8859-5',
'mk_mk.microsoftcp1251': 'mk_MK.CP1251',
'ml': 'ml_IN.UTF-8',
'ml_in': 'ml_IN.UTF-8',
'mn_mn': 'mn_MN.UTF-8',
'mni_in': 'mni_IN.UTF-8',
'mr': 'mr_IN.UTF-8',
'mr_in': 'mr_IN.UTF-8',
'ms': 'ms_MY.ISO8859-1',
'ms_my': 'ms_MY.ISO8859-1',
'ms_my.iso88591': 'ms_MY.ISO8859-1',
'mt': 'mt_MT.ISO8859-3',
'mt_mt': 'mt_MT.ISO8859-3',
'mt_mt.iso88593': 'mt_MT.ISO8859-3',
'my_mm': 'my_MM.UTF-8',
'nan_tw@latin': 'nan_TW.UTF-8@latin',
'nb': 'nb_NO.ISO8859-1',
'nb_no': 'nb_NO.ISO8859-1',
'nb_no.88591': 'nb_NO.ISO8859-1',
'nb_no.iso88591': 'nb_NO.ISO8859-1',
'nb_no.iso885915': 'nb_NO.ISO8859-15',
'nb_no@euro': 'nb_NO.ISO8859-15',
'nds_de': 'nds_DE.UTF-8',
'nds_nl': 'nds_NL.UTF-8',
'ne_np': 'ne_NP.UTF-8',
'nhn_mx': 'nhn_MX.UTF-8',
'niu_nu': 'niu_NU.UTF-8',
'niu_nz': 'niu_NZ.UTF-8',
'nl': 'nl_NL.ISO8859-1',
'nl.iso885915': 'nl_NL.ISO8859-15',
'nl_aw': 'nl_AW.UTF-8',
'nl_be': 'nl_BE.ISO8859-1',
'nl_be.88591': 'nl_BE.ISO8859-1',
'nl_be.iso88591': 'nl_BE.ISO8859-1',
'nl_be.iso885915': 'nl_BE.ISO8859-15',
'nl_be.iso885915@euro': 'nl_BE.ISO8859-15',
'nl_be.utf8@euro': 'nl_BE.UTF-8',
'nl_be@euro': 'nl_BE.ISO8859-15',
'nl_nl': 'nl_NL.ISO8859-1',
'nl_nl.88591': 'nl_NL.ISO8859-1',
'nl_nl.iso88591': 'nl_NL.ISO8859-1',
'nl_nl.iso885915': 'nl_NL.ISO8859-15',
'nl_nl.iso885915@euro': 'nl_NL.ISO8859-15',
'nl_nl.utf8@euro': 'nl_NL.UTF-8',
'nl_nl@euro': 'nl_NL.ISO8859-15',
'nn': 'nn_NO.ISO8859-1',
'nn_no': 'nn_NO.ISO8859-1',
'nn_no.88591': 'nn_NO.ISO8859-1',
'nn_no.iso88591': 'nn_NO.ISO8859-1',
'nn_no.iso885915': 'nn_NO.ISO8859-15',
'nn_no@euro': 'nn_NO.ISO8859-15',
'no': 'no_NO.ISO8859-1',
'no@nynorsk': 'ny_NO.ISO8859-1',
'no_no': 'no_NO.ISO8859-1',
'no_no.88591': 'no_NO.ISO8859-1',
'no_no.iso88591': 'no_NO.ISO8859-1',
'no_no.iso885915': 'no_NO.ISO8859-15',
'no_no.iso88591@bokmal': 'no_NO.ISO8859-1',
'no_no.iso88591@nynorsk': 'no_NO.ISO8859-1',
'no_no@euro': 'no_NO.ISO8859-15',
'norwegian': 'no_NO.ISO8859-1',
'norwegian.iso88591': 'no_NO.ISO8859-1',
'nr': 'nr_ZA.ISO8859-1',
'nr_za': 'nr_ZA.ISO8859-1',
'nr_za.iso88591': 'nr_ZA.ISO8859-1',
'nso': 'nso_ZA.ISO8859-15',
'nso_za': 'nso_ZA.ISO8859-15',
'nso_za.iso885915': 'nso_ZA.ISO8859-15',
'ny': 'ny_NO.ISO8859-1',
'ny_no': 'ny_NO.ISO8859-1',
'ny_no.88591': 'ny_NO.ISO8859-1',
'ny_no.iso88591': 'ny_NO.ISO8859-1',
'ny_no.iso885915': 'ny_NO.ISO8859-15',
'ny_no@euro': 'ny_NO.ISO8859-15',
'nynorsk': 'nn_NO.ISO8859-1',
'oc': 'oc_FR.ISO8859-1',
'oc_fr': 'oc_FR.ISO8859-1',
'oc_fr.iso88591': 'oc_FR.ISO8859-1',
'oc_fr.iso885915': 'oc_FR.ISO8859-15',
'oc_fr@euro': 'oc_FR.ISO8859-15',
'om_et': 'om_ET.UTF-8',
'om_ke': 'om_KE.ISO8859-1',
'or': 'or_IN.UTF-8',
'or_in': 'or_IN.UTF-8',
'os_ru': 'os_RU.UTF-8',
'pa': 'pa_IN.UTF-8',
'pa_in': 'pa_IN.UTF-8',
'pa_pk': 'pa_PK.UTF-8',
'pap_an': 'pap_AN.UTF-8',
'pd': 'pd_US.ISO8859-1',
'pd_de': 'pd_DE.ISO8859-1',
'pd_de.iso88591': 'pd_DE.ISO8859-1',
'pd_de.iso885915': 'pd_DE.ISO8859-15',
'pd_de@euro': 'pd_DE.ISO8859-15',
'pd_us': 'pd_US.ISO8859-1',
'pd_us.iso88591': 'pd_US.ISO8859-1',
'pd_us.iso885915': 'pd_US.ISO8859-15',
'pd_us@euro': 'pd_US.ISO8859-15',
'ph': 'ph_PH.ISO8859-1',
'ph_ph': 'ph_PH.ISO8859-1',
'ph_ph.iso88591': 'ph_PH.ISO8859-1',
'pl': 'pl_PL.ISO8859-2',
'pl_pl': 'pl_PL.ISO8859-2',
'pl_pl.iso88592': 'pl_PL.ISO8859-2',
'polish': 'pl_PL.ISO8859-2',
'portuguese': 'pt_PT.ISO8859-1',
'portuguese.iso88591': 'pt_PT.ISO8859-1',
'portuguese_brazil': 'pt_BR.ISO8859-1',
'portuguese_brazil.8859': 'pt_BR.ISO8859-1',
'posix': 'C',
'posix-utf2': 'C',
'pp': 'pp_AN.ISO8859-1',
'pp_an': 'pp_AN.ISO8859-1',
'pp_an.iso88591': 'pp_AN.ISO8859-1',
'ps_af': 'ps_AF.UTF-8',
'pt': 'pt_PT.ISO8859-1',
'pt.iso885915': 'pt_PT.ISO8859-15',
'pt_br': 'pt_BR.ISO8859-1',
'pt_br.88591': 'pt_BR.ISO8859-1',
'pt_br.iso88591': 'pt_BR.ISO8859-1',
'pt_br.iso885915': 'pt_BR.ISO8859-15',
'pt_br@euro': 'pt_BR.ISO8859-15',
'pt_pt': 'pt_PT.ISO8859-1',
'pt_pt.88591': 'pt_PT.ISO8859-1',
'pt_pt.iso88591': 'pt_PT.ISO8859-1',
'pt_pt.iso885915': 'pt_PT.ISO8859-15',
'pt_pt.iso885915@euro': 'pt_PT.ISO8859-15',
'pt_pt.utf8@euro': 'pt_PT.UTF-8',
'pt_pt@euro': 'pt_PT.ISO8859-15',
'ro': 'ro_RO.ISO8859-2',
'ro_ro': 'ro_RO.ISO8859-2',
'ro_ro.iso88592': 'ro_RO.ISO8859-2',
'romanian': 'ro_RO.ISO8859-2',
'ru': 'ru_RU.UTF-8',
'ru.koi8r': 'ru_RU.KOI8-R',
'ru_ru': 'ru_RU.UTF-8',
'ru_ru.cp1251': 'ru_RU.CP1251',
'ru_ru.iso88595': 'ru_RU.ISO8859-5',
'ru_ru.koi8r': 'ru_RU.KOI8-R',
'ru_ru.microsoftcp1251': 'ru_RU.CP1251',
'ru_ua': 'ru_UA.KOI8-U',
'ru_ua.cp1251': 'ru_UA.CP1251',
'ru_ua.koi8u': 'ru_UA.KOI8-U',
'ru_ua.microsoftcp1251': 'ru_UA.CP1251',
'rumanian': 'ro_RO.ISO8859-2',
'russian': 'ru_RU.ISO8859-5',
'rw': 'rw_RW.ISO8859-1',
'rw_rw': 'rw_RW.ISO8859-1',
'rw_rw.iso88591': 'rw_RW.ISO8859-1',
'sa_in': 'sa_IN.UTF-8',
'sat_in': 'sat_IN.UTF-8',
'sc_it': 'sc_IT.UTF-8',
'sd': 'sd_IN.UTF-8',
'sd@devanagari': 'sd_IN.UTF-8@devanagari',
'sd_in': 'sd_IN.UTF-8',
'sd_in@devanagari': 'sd_IN.UTF-8@devanagari',
'[email protected]': 'sd_IN.UTF-8@devanagari',
'sd_pk': 'sd_PK.UTF-8',
'se_no': 'se_NO.UTF-8',
'serbocroatian': 'sr_RS.UTF-8@latin',
'sh': 'sr_RS.UTF-8@latin',
'sh_ba.iso88592@bosnia': 'sr_CS.ISO8859-2',
'sh_hr': 'sh_HR.ISO8859-2',
'sh_hr.iso88592': 'hr_HR.ISO8859-2',
'sh_sp': 'sr_CS.ISO8859-2',
'sh_yu': 'sr_RS.UTF-8@latin',
'shs_ca': 'shs_CA.UTF-8',
'si': 'si_LK.UTF-8',
'si_lk': 'si_LK.UTF-8',
'sid_et': 'sid_ET.UTF-8',
'sinhala': 'si_LK.UTF-8',
'sk': 'sk_SK.ISO8859-2',
'sk_sk': 'sk_SK.ISO8859-2',
'sk_sk.iso88592': 'sk_SK.ISO8859-2',
'sl': 'sl_SI.ISO8859-2',
'sl_cs': 'sl_CS.ISO8859-2',
'sl_si': 'sl_SI.ISO8859-2',
'sl_si.iso88592': 'sl_SI.ISO8859-2',
'slovak': 'sk_SK.ISO8859-2',
'slovene': 'sl_SI.ISO8859-2',
'slovenian': 'sl_SI.ISO8859-2',
'so_dj': 'so_DJ.ISO8859-1',
'so_et': 'so_ET.UTF-8',
'so_ke': 'so_KE.ISO8859-1',
'so_so': 'so_SO.ISO8859-1',
'sp': 'sr_CS.ISO8859-5',
'sp_yu': 'sr_CS.ISO8859-5',
'spanish': 'es_ES.ISO8859-1',
'spanish.iso88591': 'es_ES.ISO8859-1',
'spanish_spain': 'es_ES.ISO8859-1',
'spanish_spain.8859': 'es_ES.ISO8859-1',
'sq': 'sq_AL.ISO8859-2',
'sq_al': 'sq_AL.ISO8859-2',
'sq_al.iso88592': 'sq_AL.ISO8859-2',
'sq_mk': 'sq_MK.UTF-8',
'sr': 'sr_RS.UTF-8',
'sr@cyrillic': 'sr_RS.UTF-8',
'sr@latin': 'sr_RS.UTF-8@latin',
'sr@latn': 'sr_CS.UTF-8@latin',
'sr_cs': 'sr_CS.UTF-8',
'sr_cs.iso88592': 'sr_CS.ISO8859-2',
'sr_cs.iso88592@latn': 'sr_CS.ISO8859-2',
'sr_cs.iso88595': 'sr_CS.ISO8859-5',
'sr_cs.utf8@latn': 'sr_CS.UTF-8@latin',
'sr_cs@latn': 'sr_CS.UTF-8@latin',
'sr_me': 'sr_ME.UTF-8',
'sr_rs': 'sr_RS.UTF-8',
'sr_rs@latin': 'sr_RS.UTF-8@latin',
'sr_rs@latn': 'sr_RS.UTF-8@latin',
'sr_sp': 'sr_CS.ISO8859-2',
'sr_yu': 'sr_RS.UTF-8@latin',
'sr_yu.cp1251@cyrillic': 'sr_CS.CP1251',
'sr_yu.iso88592': 'sr_CS.ISO8859-2',
'sr_yu.iso88595': 'sr_CS.ISO8859-5',
'sr_yu.iso88595@cyrillic': 'sr_CS.ISO8859-5',
'sr_yu.microsoftcp1251@cyrillic': 'sr_CS.CP1251',
'sr_yu.utf8': 'sr_RS.UTF-8',
'sr_yu.utf8@cyrillic': 'sr_RS.UTF-8',
'sr_yu@cyrillic': 'sr_RS.UTF-8',
'ss': 'ss_ZA.ISO8859-1',
'ss_za': 'ss_ZA.ISO8859-1',
'ss_za.iso88591': 'ss_ZA.ISO8859-1',
'st': 'st_ZA.ISO8859-1',
'st_za': 'st_ZA.ISO8859-1',
'st_za.iso88591': 'st_ZA.ISO8859-1',
'sv': 'sv_SE.ISO8859-1',
'sv.iso885915': 'sv_SE.ISO8859-15',
'sv_fi': 'sv_FI.ISO8859-1',
'sv_fi.iso88591': 'sv_FI.ISO8859-1',
'sv_fi.iso885915': 'sv_FI.ISO8859-15',
'sv_fi.iso885915@euro': 'sv_FI.ISO8859-15',
'sv_fi.utf8@euro': 'sv_FI.UTF-8',
'sv_fi@euro': 'sv_FI.ISO8859-15',
'sv_se': 'sv_SE.ISO8859-1',
'sv_se.88591': 'sv_SE.ISO8859-1',
'sv_se.iso88591': 'sv_SE.ISO8859-1',
'sv_se.iso885915': 'sv_SE.ISO8859-15',
'sv_se@euro': 'sv_SE.ISO8859-15',
'sw_ke': 'sw_KE.UTF-8',
'sw_tz': 'sw_TZ.UTF-8',
'swedish': 'sv_SE.ISO8859-1',
'swedish.iso88591': 'sv_SE.ISO8859-1',
'szl_pl': 'szl_PL.UTF-8',
'ta': 'ta_IN.TSCII-0',
'ta_in': 'ta_IN.TSCII-0',
'ta_in.tscii': 'ta_IN.TSCII-0',
'ta_in.tscii0': 'ta_IN.TSCII-0',
'ta_lk': 'ta_LK.UTF-8',
'te': 'te_IN.UTF-8',
'te_in': 'te_IN.UTF-8',
'tg': 'tg_TJ.KOI8-C',
'tg_tj': 'tg_TJ.KOI8-C',
'tg_tj.koi8c': 'tg_TJ.KOI8-C',
'th': 'th_TH.ISO8859-11',
'th_th': 'th_TH.ISO8859-11',
'th_th.iso885911': 'th_TH.ISO8859-11',
'th_th.tactis': 'th_TH.TIS620',
'th_th.tis620': 'th_TH.TIS620',
'thai': 'th_TH.ISO8859-11',
'ti_er': 'ti_ER.UTF-8',
'ti_et': 'ti_ET.UTF-8',
'tig_er': 'tig_ER.UTF-8',
'tk_tm': 'tk_TM.UTF-8',
'tl': 'tl_PH.ISO8859-1',
'tl_ph': 'tl_PH.ISO8859-1',
'tl_ph.iso88591': 'tl_PH.ISO8859-1',
'tn': 'tn_ZA.ISO8859-15',
'tn_za': 'tn_ZA.ISO8859-15',
'tn_za.iso885915': 'tn_ZA.ISO8859-15',
'tr': 'tr_TR.ISO8859-9',
'tr_cy': 'tr_CY.ISO8859-9',
'tr_tr': 'tr_TR.ISO8859-9',
'tr_tr.iso88599': 'tr_TR.ISO8859-9',
'ts': 'ts_ZA.ISO8859-1',
'ts_za': 'ts_ZA.ISO8859-1',
'ts_za.iso88591': 'ts_ZA.ISO8859-1',
'tt': 'tt_RU.TATAR-CYR',
'tt_ru': 'tt_RU.TATAR-CYR',
'tt_ru.koi8c': 'tt_RU.KOI8-C',
'tt_ru.tatarcyr': 'tt_RU.TATAR-CYR',
'tt_ru@iqtelif': 'tt_RU.UTF-8@iqtelif',
'turkish': 'tr_TR.ISO8859-9',
'turkish.iso88599': 'tr_TR.ISO8859-9',
'ug_cn': 'ug_CN.UTF-8',
'uk': 'uk_UA.KOI8-U',
'uk_ua': 'uk_UA.KOI8-U',
'uk_ua.cp1251': 'uk_UA.CP1251',
'uk_ua.iso88595': 'uk_UA.ISO8859-5',
'uk_ua.koi8u': 'uk_UA.KOI8-U',
'uk_ua.microsoftcp1251': 'uk_UA.CP1251',
'univ': 'en_US.utf',
'universal': 'en_US.utf',
'universal.utf8@ucs4': 'en_US.UTF-8',
'unm_us': 'unm_US.UTF-8',
'ur': 'ur_PK.CP1256',
'ur_in': 'ur_IN.UTF-8',
'ur_pk': 'ur_PK.CP1256',
'ur_pk.cp1256': 'ur_PK.CP1256',
'ur_pk.microsoftcp1256': 'ur_PK.CP1256',
'uz': 'uz_UZ.UTF-8',
'uz_uz': 'uz_UZ.UTF-8',
'uz_uz.iso88591': 'uz_UZ.ISO8859-1',
'uz_uz.utf8@cyrillic': 'uz_UZ.UTF-8',
'uz_uz@cyrillic': 'uz_UZ.UTF-8',
've': 've_ZA.UTF-8',
've_za': 've_ZA.UTF-8',
'vi': 'vi_VN.TCVN',
'vi_vn': 'vi_VN.TCVN',
'vi_vn.tcvn': 'vi_VN.TCVN',
'vi_vn.tcvn5712': 'vi_VN.TCVN',
'vi_vn.viscii': 'vi_VN.VISCII',
'vi_vn.viscii111': 'vi_VN.VISCII',
'wa': 'wa_BE.ISO8859-1',
'wa_be': 'wa_BE.ISO8859-1',
'wa_be.iso88591': 'wa_BE.ISO8859-1',
'wa_be.iso885915': 'wa_BE.ISO8859-15',
'wa_be.iso885915@euro': 'wa_BE.ISO8859-15',
'wa_be@euro': 'wa_BE.ISO8859-15',
'wae_ch': 'wae_CH.UTF-8',
'wal_et': 'wal_ET.UTF-8',
'wo_sn': 'wo_SN.UTF-8',
'xh': 'xh_ZA.ISO8859-1',
'xh_za': 'xh_ZA.ISO8859-1',
'xh_za.iso88591': 'xh_ZA.ISO8859-1',
'yi': 'yi_US.CP1255',
'yi_us': 'yi_US.CP1255',
'yi_us.cp1255': 'yi_US.CP1255',
'yi_us.microsoftcp1255': 'yi_US.CP1255',
'yo_ng': 'yo_NG.UTF-8',
'yue_hk': 'yue_HK.UTF-8',
'zh': 'zh_CN.eucCN',
'zh_cn': 'zh_CN.gb2312',
'zh_cn.big5': 'zh_TW.big5',
'zh_cn.euc': 'zh_CN.eucCN',
'zh_cn.gb18030': 'zh_CN.gb18030',
'zh_cn.gb2312': 'zh_CN.gb2312',
'zh_cn.gbk': 'zh_CN.gbk',
'zh_hk': 'zh_HK.big5hkscs',
'zh_hk.big5': 'zh_HK.big5',
'zh_hk.big5hk': 'zh_HK.big5hkscs',
'zh_hk.big5hkscs': 'zh_HK.big5hkscs',
'zh_sg': 'zh_SG.GB2312',
'zh_sg.gbk': 'zh_SG.GBK',
'zh_tw': 'zh_TW.big5',
'zh_tw.big5': 'zh_TW.big5',
'zh_tw.euc': 'zh_TW.eucTW',
'zh_tw.euctw': 'zh_TW.eucTW',
'zu': 'zu_ZA.ISO8859-1',
'zu_za': 'zu_ZA.ISO8859-1',
'zu_za.iso88591': 'zu_ZA.ISO8859-1',
}
#
# This maps Windows language identifiers to locale strings.
#
# This list has been updated from
# http://msdn.microsoft.com/library/default.asp?url=/library/en-us/intl/nls_238z.asp
# to include every locale up to Windows Vista.
#
# NOTE: this mapping is incomplete. If your language is missing, please
# submit a bug report to the Python bug tracker at http://bugs.python.org/
# Make sure you include the missing language identifier and the suggested
# locale code.
#
windows_locale = {
0x0436: "af_ZA", # Afrikaans
0x041c: "sq_AL", # Albanian
0x0484: "gsw_FR",# Alsatian - France
0x045e: "am_ET", # Amharic - Ethiopia
0x0401: "ar_SA", # Arabic - Saudi Arabia
0x0801: "ar_IQ", # Arabic - Iraq
0x0c01: "ar_EG", # Arabic - Egypt
0x1001: "ar_LY", # Arabic - Libya
0x1401: "ar_DZ", # Arabic - Algeria
0x1801: "ar_MA", # Arabic - Morocco
0x1c01: "ar_TN", # Arabic - Tunisia
0x2001: "ar_OM", # Arabic - Oman
0x2401: "ar_YE", # Arabic - Yemen
0x2801: "ar_SY", # Arabic - Syria
0x2c01: "ar_JO", # Arabic - Jordan
0x3001: "ar_LB", # Arabic - Lebanon
0x3401: "ar_KW", # Arabic - Kuwait
0x3801: "ar_AE", # Arabic - United Arab Emirates
0x3c01: "ar_BH", # Arabic - Bahrain
0x4001: "ar_QA", # Arabic - Qatar
0x042b: "hy_AM", # Armenian
0x044d: "as_IN", # Assamese - India
0x042c: "az_AZ", # Azeri - Latin
0x082c: "az_AZ", # Azeri - Cyrillic
0x046d: "ba_RU", # Bashkir
0x042d: "eu_ES", # Basque - Russia
0x0423: "be_BY", # Belarusian
0x0445: "bn_IN", # Begali
0x201a: "bs_BA", # Bosnian - Cyrillic
0x141a: "bs_BA", # Bosnian - Latin
0x047e: "br_FR", # Breton - France
0x0402: "bg_BG", # Bulgarian
# 0x0455: "my_MM", # Burmese - Not supported
0x0403: "ca_ES", # Catalan
0x0004: "zh_CHS",# Chinese - Simplified
0x0404: "zh_TW", # Chinese - Taiwan
0x0804: "zh_CN", # Chinese - PRC
0x0c04: "zh_HK", # Chinese - Hong Kong S.A.R.
0x1004: "zh_SG", # Chinese - Singapore
0x1404: "zh_MO", # Chinese - Macao S.A.R.
0x7c04: "zh_CHT",# Chinese - Traditional
0x0483: "co_FR", # Corsican - France
0x041a: "hr_HR", # Croatian
0x101a: "hr_BA", # Croatian - Bosnia
0x0405: "cs_CZ", # Czech
0x0406: "da_DK", # Danish
0x048c: "gbz_AF",# Dari - Afghanistan
0x0465: "div_MV",# Divehi - Maldives
0x0413: "nl_NL", # Dutch - The Netherlands
0x0813: "nl_BE", # Dutch - Belgium
0x0409: "en_US", # English - United States
0x0809: "en_GB", # English - United Kingdom
0x0c09: "en_AU", # English - Australia
0x1009: "en_CA", # English - Canada
0x1409: "en_NZ", # English - New Zealand
0x1809: "en_IE", # English - Ireland
0x1c09: "en_ZA", # English - South Africa
0x2009: "en_JA", # English - Jamaica
0x2409: "en_CB", # English - Carribbean
0x2809: "en_BZ", # English - Belize
0x2c09: "en_TT", # English - Trinidad
0x3009: "en_ZW", # English - Zimbabwe
0x3409: "en_PH", # English - Philippines
0x4009: "en_IN", # English - India
0x4409: "en_MY", # English - Malaysia
0x4809: "en_IN", # English - Singapore
0x0425: "et_EE", # Estonian
0x0438: "fo_FO", # Faroese
0x0464: "fil_PH",# Filipino
0x040b: "fi_FI", # Finnish
0x040c: "fr_FR", # French - France
0x080c: "fr_BE", # French - Belgium
0x0c0c: "fr_CA", # French - Canada
0x100c: "fr_CH", # French - Switzerland
0x140c: "fr_LU", # French - Luxembourg
0x180c: "fr_MC", # French - Monaco
0x0462: "fy_NL", # Frisian - Netherlands
0x0456: "gl_ES", # Galician
0x0437: "ka_GE", # Georgian
0x0407: "de_DE", # German - Germany
0x0807: "de_CH", # German - Switzerland
0x0c07: "de_AT", # German - Austria
0x1007: "de_LU", # German - Luxembourg
0x1407: "de_LI", # German - Liechtenstein
0x0408: "el_GR", # Greek
0x046f: "kl_GL", # Greenlandic - Greenland
0x0447: "gu_IN", # Gujarati
0x0468: "ha_NG", # Hausa - Latin
0x040d: "he_IL", # Hebrew
0x0439: "hi_IN", # Hindi
0x040e: "hu_HU", # Hungarian
0x040f: "is_IS", # Icelandic
0x0421: "id_ID", # Indonesian
0x045d: "iu_CA", # Inuktitut - Syllabics
0x085d: "iu_CA", # Inuktitut - Latin
0x083c: "ga_IE", # Irish - Ireland
0x0410: "it_IT", # Italian - Italy
0x0810: "it_CH", # Italian - Switzerland
0x0411: "ja_JP", # Japanese
0x044b: "kn_IN", # Kannada - India
0x043f: "kk_KZ", # Kazakh
0x0453: "kh_KH", # Khmer - Cambodia
0x0486: "qut_GT",# K'iche - Guatemala
0x0487: "rw_RW", # Kinyarwanda - Rwanda
0x0457: "kok_IN",# Konkani
0x0412: "ko_KR", # Korean
0x0440: "ky_KG", # Kyrgyz
0x0454: "lo_LA", # Lao - Lao PDR
0x0426: "lv_LV", # Latvian
0x0427: "lt_LT", # Lithuanian
0x082e: "dsb_DE",# Lower Sorbian - Germany
0x046e: "lb_LU", # Luxembourgish
0x042f: "mk_MK", # FYROM Macedonian
0x043e: "ms_MY", # Malay - Malaysia
0x083e: "ms_BN", # Malay - Brunei Darussalam
0x044c: "ml_IN", # Malayalam - India
0x043a: "mt_MT", # Maltese
0x0481: "mi_NZ", # Maori
0x047a: "arn_CL",# Mapudungun
0x044e: "mr_IN", # Marathi
0x047c: "moh_CA",# Mohawk - Canada
0x0450: "mn_MN", # Mongolian - Cyrillic
0x0850: "mn_CN", # Mongolian - PRC
0x0461: "ne_NP", # Nepali
0x0414: "nb_NO", # Norwegian - Bokmal
0x0814: "nn_NO", # Norwegian - Nynorsk
0x0482: "oc_FR", # Occitan - France
0x0448: "or_IN", # Oriya - India
0x0463: "ps_AF", # Pashto - Afghanistan
0x0429: "fa_IR", # Persian
0x0415: "pl_PL", # Polish
0x0416: "pt_BR", # Portuguese - Brazil
0x0816: "pt_PT", # Portuguese - Portugal
0x0446: "pa_IN", # Punjabi
0x046b: "quz_BO",# Quechua (Bolivia)
0x086b: "quz_EC",# Quechua (Ecuador)
0x0c6b: "quz_PE",# Quechua (Peru)
0x0418: "ro_RO", # Romanian - Romania
0x0417: "rm_CH", # Romansh
0x0419: "ru_RU", # Russian
0x243b: "smn_FI",# Sami Finland
0x103b: "smj_NO",# Sami Norway
0x143b: "smj_SE",# Sami Sweden
0x043b: "se_NO", # Sami Northern Norway
0x083b: "se_SE", # Sami Northern Sweden
0x0c3b: "se_FI", # Sami Northern Finland
0x203b: "sms_FI",# Sami Skolt
0x183b: "sma_NO",# Sami Southern Norway
0x1c3b: "sma_SE",# Sami Southern Sweden
0x044f: "sa_IN", # Sanskrit
0x0c1a: "sr_SP", # Serbian - Cyrillic
0x1c1a: "sr_BA", # Serbian - Bosnia Cyrillic
0x081a: "sr_SP", # Serbian - Latin
0x181a: "sr_BA", # Serbian - Bosnia Latin
0x045b: "si_LK", # Sinhala - Sri Lanka
0x046c: "ns_ZA", # Northern Sotho
0x0432: "tn_ZA", # Setswana - Southern Africa
0x041b: "sk_SK", # Slovak
0x0424: "sl_SI", # Slovenian
0x040a: "es_ES", # Spanish - Spain
0x080a: "es_MX", # Spanish - Mexico
0x0c0a: "es_ES", # Spanish - Spain (Modern)
0x100a: "es_GT", # Spanish - Guatemala
0x140a: "es_CR", # Spanish - Costa Rica
0x180a: "es_PA", # Spanish - Panama
0x1c0a: "es_DO", # Spanish - Dominican Republic
0x200a: "es_VE", # Spanish - Venezuela
0x240a: "es_CO", # Spanish - Colombia
0x280a: "es_PE", # Spanish - Peru
0x2c0a: "es_AR", # Spanish - Argentina
0x300a: "es_EC", # Spanish - Ecuador
0x340a: "es_CL", # Spanish - Chile
0x380a: "es_UR", # Spanish - Uruguay
0x3c0a: "es_PY", # Spanish - Paraguay
0x400a: "es_BO", # Spanish - Bolivia
0x440a: "es_SV", # Spanish - El Salvador
0x480a: "es_HN", # Spanish - Honduras
0x4c0a: "es_NI", # Spanish - Nicaragua
0x500a: "es_PR", # Spanish - Puerto Rico
0x540a: "es_US", # Spanish - United States
# 0x0430: "", # Sutu - Not supported
0x0441: "sw_KE", # Swahili
0x041d: "sv_SE", # Swedish - Sweden
0x081d: "sv_FI", # Swedish - Finland
0x045a: "syr_SY",# Syriac
0x0428: "tg_TJ", # Tajik - Cyrillic
0x085f: "tmz_DZ",# Tamazight - Latin
0x0449: "ta_IN", # Tamil
0x0444: "tt_RU", # Tatar
0x044a: "te_IN", # Telugu
0x041e: "th_TH", # Thai
0x0851: "bo_BT", # Tibetan - Bhutan
0x0451: "bo_CN", # Tibetan - PRC
0x041f: "tr_TR", # Turkish
0x0442: "tk_TM", # Turkmen - Cyrillic
0x0480: "ug_CN", # Uighur - Arabic
0x0422: "uk_UA", # Ukrainian
0x042e: "wen_DE",# Upper Sorbian - Germany
0x0420: "ur_PK", # Urdu
0x0820: "ur_IN", # Urdu - India
0x0443: "uz_UZ", # Uzbek - Latin
0x0843: "uz_UZ", # Uzbek - Cyrillic
0x042a: "vi_VN", # Vietnamese
0x0452: "cy_GB", # Welsh
0x0488: "wo_SN", # Wolof - Senegal
0x0434: "xh_ZA", # Xhosa - South Africa
0x0485: "sah_RU",# Yakut - Cyrillic
0x0478: "ii_CN", # Yi - PRC
0x046a: "yo_NG", # Yoruba - Nigeria
0x0435: "zu_ZA", # Zulu
}
def _print_locale():
""" Test function.
"""
categories = {}
def _init_categories(categories=categories):
for k,v in globals().items():
if k[:3] == 'LC_':
categories[k] = v
_init_categories()
del categories['LC_ALL']
print 'Locale defaults as determined by getdefaultlocale():'
print '-'*72
lang, enc = getdefaultlocale()
print 'Language: ', lang or '(undefined)'
print 'Encoding: ', enc or '(undefined)'
print
print 'Locale settings on startup:'
print '-'*72
for name,category in categories.items():
print name, '...'
lang, enc = getlocale(category)
print ' Language: ', lang or '(undefined)'
print ' Encoding: ', enc or '(undefined)'
print
print
print 'Locale settings after calling resetlocale():'
print '-'*72
resetlocale()
for name,category in categories.items():
print name, '...'
lang, enc = getlocale(category)
print ' Language: ', lang or '(undefined)'
print ' Encoding: ', enc or '(undefined)'
print
try:
setlocale(LC_ALL, "")
except:
print 'NOTE:'
print 'setlocale(LC_ALL, "") does not support the default locale'
print 'given in the OS environment variables.'
else:
print
print 'Locale settings after calling setlocale(LC_ALL, ""):'
print '-'*72
for name,category in categories.items():
print name, '...'
lang, enc = getlocale(category)
print ' Language: ', lang or '(undefined)'
print ' Encoding: ', enc or '(undefined)'
print
###
try:
LC_MESSAGES
except NameError:
pass
else:
__all__.append("LC_MESSAGES")
if __name__=='__main__':
print 'Locale aliasing:'
print
_print_locale()
print
print 'Number formatting:'
print
_test()
|
mit
|
robertwbrandt/ZarafaAdmin
|
bin/zarafa-permissions.py
|
2
|
7579
|
#!/usr/bin/env python
"""
Python wrapper for zarafa-admin --type group --details group
"""
import argparse, textwrap, fnmatch, datetime
import xml.etree.cElementTree as ElementTree
import subprocess
from multiprocessing import Process, Queue
# Import Brandt Common Utilities
import sys, os
sys.path.append( os.path.realpath( os.path.join( os.path.dirname(__file__), "/opt/brandt/common" ) ) )
import brandt
sys.path.pop()
args = {}
args['cache'] = 15
args['output'] = 'text'
args['group'] = ''
args['delimiter'] = ""
version = 0.3
encoding = 'utf-8'
fieldmappings = (("groupname","Group Name"),("fullname","Fullname"),
("emailaddress","Email Address"),("addressbook","Address Book"))
ldapfieldmappings = (("pr_ec_enabled_features","Enabled Features"),("pr_ec_disabled_features","Disabled Features"))
class customUsageVersion(argparse.Action):
def __init__(self, option_strings, dest, **kwargs):
self.__version = str(kwargs.get('version', ''))
self.__prog = str(kwargs.get('prog', os.path.basename(__file__)))
self.__row = min(int(kwargs.get('max', 80)), brandt.getTerminalSize()[0])
self.__exit = int(kwargs.get('exit', 0))
super(customUsageVersion, self).__init__(option_strings, dest, nargs=0)
def __call__(self, parser, namespace, values, option_string=None):
# print('%r %r %r' % (namespace, values, option_string))
if self.__version:
print self.__prog + " " + self.__version
print "Copyright (C) 2013 Free Software Foundation, Inc."
print "License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>."
version = "This program is free software: you can redistribute it and/or modify "
version += "it under the terms of the GNU General Public License as published by "
version += "the Free Software Foundation, either version 3 of the License, or "
version += "(at your option) any later version."
print textwrap.fill(version, self.__row)
version = "This program is distributed in the hope that it will be useful, "
version += "but WITHOUT ANY WARRANTY; without even the implied warranty of "
version += "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the "
version += "GNU General Public License for more details."
print textwrap.fill(version, self.__row)
print "\nWritten by Bob Brandt <[email protected]>."
else:
print "Usage: " + self.__prog + " [options] [username]"
print "Script used to find details about Zarafa user perissions.\n"
print "Options:"
options = []
options.append(("-h, --help", "Show this help message and exit"))
options.append(("-v, --version", "Show program's version number and exit"))
options.append(("-o, --output OUTPUT", "Type of output {text | csv | xml}"))
options.append(("-c, --cache MINUTES", "Cache time. (in minutes)"))
options.append(("-d, --delimiter DELIM", "Character to use instead of TAB for field delimiter"))
options.append(("username", "Filter to apply to usernames."))
length = max( [ len(option[0]) for option in options ] )
for option in options:
description = textwrap.wrap(option[1], (self.__row - length - 5))
print " " + option[0].ljust(length) + " " + description[0]
for n in range(1,len(description)): print " " * (length + 5) + description[n]
exit(self.__exit)
def command_line_args():
global args, version
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-v', '--version', action=customUsageVersion, version=version, max=80)
parser.add_argument('-h', '--help', action=customUsageVersion)
parser.add_argument('-c', '--cache',
required=False,
default=args['cache'],
type=int,
help="Cache time. (in minutes)")
parser.add_argument('-d', '--delimiter',
required=False,
default=args['delimiter'],
type=str,
help="Character to use instead of TAB for field delimiter")
parser.add_argument('-o', '--output',
required=False,
default=args['output'],
choices=['text', 'csv', 'xml'],
help="Display output type.")
parser.add_argument('group',
nargs='?',
default= args['group'],
action='store',
help="Group to retrieve details about.")
args.update(vars(parser.parse_args()))
if args['delimiter']: args['delimiter'] = args['delimiter'][0]
if not args['delimiter'] and args['output'] == "csv": args['delimiter'] = ","
def get_data():
global args
command = 'zarafa-mailbox-permissions --list-permissions-per-folder brandtb'
cachefile = '/tmp/zarafa-mailbox-permissions.cache'
args['cache'] *= 60
age = args['cache'] + 1
try:
age = (datetime.datetime.now() - datetime.datetime.fromtimestamp(os.stat(cachefile).st_mtime)).seconds
except:
pass
if age > args['cache']:
p = subprocess.Popen(command.split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if err: raise IOError(err)
out = out.split("\n")
permissions = []
SendMeetingRequest = ""
delegate = []
for c in reversed(range(len(out))):
if out[c]:
if out[c] == "Folder permissions:":
permissions = out[c:]
del out[c:]
elif out[c][:83] == "Send meeting requests and response only to the delegator, not to the mailbox owner.":
SendMeetingRequest = out[c]
out.pop(c)
elif out[c] == "Delegate information:":
delegate = out[c:]
del out[c:]
else:
out.pop(c)
delegate = delegate[4:-1]
permissions = permissions[4:-1]
print "\n".join(delegate)
print SendMeetingRequest
print "\n".join(permissions)
# print out
# out = out.strip().split('\n')[3:]
# for c in reversed(range(len(out))):
# if out[c]:
# out[c] = out[c].strip()
# if out[c] != "Everyone": continue
# out.pop(c)
# out = sorted(out, key=lambda s: s.lower())
# f = open(cachefile, 'w')
# f.write("\n".join(out))
# f.close()
# else:
# f = open(cachefile, 'r')
# out = f.read().split('\n')
# f.close()
# # Apply groupname filter
# if args['group']:
# for c in reversed(range(len(out))):
# if out[c] and fnmatch.fnmatch(out[c].lower(), args['group'].lower()): continue
# out.pop(c)
return out
# Start program
if __name__ == "__main__":
command_line_args()
exitcode = 0
# try:
permissions = get_data()
# if len(groups) == 1:
# xmldata = zarafa_group(groups[0])
# else:
# xmldata = zarafa_groups(groups)
# if args['output'] == 'xml':
# xml = ElementTree.Element('zarafaadmin')
# xml.append(xmldata)
# print '<?xml version="1.0" encoding="' + encoding + '"?>\n' + ElementTree.tostring(xml, encoding=encoding, method="xml")
# except ( Exception, SystemExit ) as err:
# try:
# exitcode = int(err[0])
# errmsg = str(" ".join(err[1:]))
# except:
# exitcode = -1
# errmsg = str(" ".join(err))
# if args['output'] != 'xml':
# if exitcode != 0: sys.stderr.write( str(err) +'\n' )
# else:
# xml = ElementTree.Element('zarafaadmin')
# xmldata = ElementTree.SubElement(xml, 'error', errorcode = str(exitcode) )
# xmldata.text = errmsg
# print '<?xml version="1.0" encoding="' + encoding + '"?>\n' + ElementTree.tostring(xml, encoding=encoding, method="xml")
# sys.exit(exitcode)
|
gpl-3.0
|
m-labs/milkymist
|
cores/softusb/navre_regress/test_opcodes/test_ANDI.py
|
5
|
3274
|
#! /usr/bin/env python
###############################################################################
#
# simulavr - A simulator for the Atmel AVR family of microcontrollers.
# Copyright (C) 2001, 2002 Theodore A. Roth
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
###############################################################################
#
# $Id: test_ANDI.py,v 1.1 2004/07/31 00:59:11 rivetwa Exp $
#
"""Test the ANDI opcode.
"""
import base_test
from registers import Reg, SREG
class ANDI_TestFail(base_test.TestFail): pass
class base_ANDI(base_test.opcode_test):
"""Generic test case for testing ANDI opcode.
ANDI - Logical AND with Immediate
opcode is '0111 KKKK dddd KKKK' where 16 <= d <= 31, 0 <= K <= 255
Only registers PC, Rd and SREG should be changed.
"""
def setup(self):
# Set SREG to have only V set (opcode should clear it)
self.setup_regs[Reg.SREG] = 1 << SREG.V
# Set the register values
self.setup_regs[self.Rd] = self.Vd
# Return the raw opcode
return 0x7000 | ((self.Rd - 16) << 4) | ((self.Vk & 0xf0) << 4) | (self.Vk & 0xf)
def analyze_results(self):
self.reg_changed.extend( [self.Rd, Reg.SREG] )
# check that result is correct
expect = ((self.Vd & self.Vk) & 0xff)
got = self.anal_regs[self.Rd]
if expect != got:
self.fail('ANDI r%02d, 0x%02x: 0x%02x & 0x%02x = (expect=%02x, got=%02x)' % (
self.Rd, self.Vk, self.Vd, self.Vk, expect, got))
expect_sreg = 0
# calculate what we expect sreg to be (I, T, H, V and C should be zero)
V = 0
N = ((expect & 0x80) != 0)
expect_sreg += N << SREG.N
expect_sreg += (N ^ V) << SREG.S
expect_sreg += (expect == 0) << SREG.Z
got_sreg = self.anal_regs[Reg.SREG]
if expect_sreg != got_sreg:
self.fail('ANDI r%02d, 0x%02x: 0x%02x + 0x%02x -> SREG (expect=%02x, got=%02x)' % (
self.Rd, self.Vk, self.Vd, self.Vk, expect_sreg, got_sreg))
#
# Template code for test case.
# The fail method will raise a test specific exception.
#
template = """
class ANDI_r%02d_v%02x_k%02x_TestFail(ANDI_TestFail): pass
class test_ANDI_r%02d_v%02x_k%02x(base_ANDI):
Rd = %d
Vd = 0x%x
Vk = 0x%x
def fail(self,s):
raise ANDI_r%02d_v%02x_k%02x_TestFail, s
"""
#
# Define a list of test values such that we test all the cases of SREG bits being set.
#
vals = (
( 0x00, 0x00 ),
( 0xff, 0x00 ),
( 0xfe, 0x01 ),
( 0x0f, 0x00 ),
( 0x0f, 0xf0 ),
( 0x01, 0x02 ),
( 0x80, 0x80 )
)
#
# automagically generate the test_ANDI_rNN_vXX_rrNN_kXX class definitions.
#
code = ''
for d in range(16,32):
for vd,vk in vals:
args = (d,vd,vk)*4
code += template % args
exec code
|
lgpl-3.0
|
SaschaMester/delicium
|
third_party/closure_linter/closure_linter/errors.py
|
99
|
4184
|
#!/usr/bin/env python
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Error codes for JavaScript style checker."""
__author__ = ('[email protected] (Robert Walker)',
'[email protected] (Andy Perelson)')
def ByName(name):
"""Get the error code for the given error name.
Args:
name: The name of the error
Returns:
The error code
"""
return globals()[name]
# "File-fatal" errors - these errors stop further parsing of a single file
FILE_NOT_FOUND = -1
FILE_DOES_NOT_PARSE = -2
# Spacing
EXTRA_SPACE = 1
MISSING_SPACE = 2
EXTRA_LINE = 3
MISSING_LINE = 4
ILLEGAL_TAB = 5
WRONG_INDENTATION = 6
WRONG_BLANK_LINE_COUNT = 7
# Semicolons
MISSING_SEMICOLON = 10
MISSING_SEMICOLON_AFTER_FUNCTION = 11
ILLEGAL_SEMICOLON_AFTER_FUNCTION = 12
REDUNDANT_SEMICOLON = 13
# Miscellaneous
ILLEGAL_PROTOTYPE_MEMBER_VALUE = 100
LINE_TOO_LONG = 110
LINE_STARTS_WITH_OPERATOR = 120
COMMA_AT_END_OF_LITERAL = 121
MULTI_LINE_STRING = 130
UNNECESSARY_DOUBLE_QUOTED_STRING = 131
UNUSED_PRIVATE_MEMBER = 132
UNUSED_LOCAL_VARIABLE = 133
# Requires, provides
GOOG_REQUIRES_NOT_ALPHABETIZED = 140
GOOG_PROVIDES_NOT_ALPHABETIZED = 141
MISSING_GOOG_REQUIRE = 142
MISSING_GOOG_PROVIDE = 143
EXTRA_GOOG_REQUIRE = 144
EXTRA_GOOG_PROVIDE = 145
# JsDoc
INVALID_JSDOC_TAG = 200
INVALID_USE_OF_DESC_TAG = 201
NO_BUG_NUMBER_AFTER_BUG_TAG = 202
MISSING_PARAMETER_DOCUMENTATION = 210
EXTRA_PARAMETER_DOCUMENTATION = 211
WRONG_PARAMETER_DOCUMENTATION = 212
MISSING_JSDOC_TAG_TYPE = 213
MISSING_JSDOC_TAG_DESCRIPTION = 214
MISSING_JSDOC_PARAM_NAME = 215
OUT_OF_ORDER_JSDOC_TAG_TYPE = 216
MISSING_RETURN_DOCUMENTATION = 217
UNNECESSARY_RETURN_DOCUMENTATION = 218
MISSING_BRACES_AROUND_TYPE = 219
MISSING_MEMBER_DOCUMENTATION = 220
MISSING_PRIVATE = 221
EXTRA_PRIVATE = 222
INVALID_OVERRIDE_PRIVATE = 223
INVALID_INHERIT_DOC_PRIVATE = 224
MISSING_JSDOC_TAG_THIS = 225
UNNECESSARY_BRACES_AROUND_INHERIT_DOC = 226
INVALID_AUTHOR_TAG_DESCRIPTION = 227
JSDOC_PREFER_QUESTION_TO_PIPE_NULL = 230
JSDOC_ILLEGAL_QUESTION_WITH_PIPE = 231
JSDOC_MISSING_OPTIONAL_TYPE = 232
JSDOC_MISSING_OPTIONAL_PREFIX = 233
JSDOC_MISSING_VAR_ARGS_TYPE = 234
JSDOC_MISSING_VAR_ARGS_NAME = 235
# TODO(robbyw): Split this in to more specific syntax problems.
INCORRECT_SUPPRESS_SYNTAX = 250
INVALID_SUPPRESS_TYPE = 251
UNNECESSARY_SUPPRESS = 252
# File ending
FILE_MISSING_NEWLINE = 300
FILE_IN_BLOCK = 301
# Interfaces
INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS = 400
INTERFACE_METHOD_CANNOT_HAVE_CODE = 401
# Comments
MISSING_END_OF_SCOPE_COMMENT = 500
MALFORMED_END_OF_SCOPE_COMMENT = 501
# goog.scope - Namespace aliasing
# TODO(nnaze) Add additional errors here and in aliaspass.py
INVALID_USE_OF_GOOG_SCOPE = 600
EXTRA_GOOG_SCOPE_USAGE = 601
# ActionScript specific errors:
# TODO(user): move these errors to their own file and move all JavaScript
# specific errors to their own file as well.
# All ActionScript specific errors should have error number at least 1000.
FUNCTION_MISSING_RETURN_TYPE = 1132
PARAMETER_MISSING_TYPE = 1133
VAR_MISSING_TYPE = 1134
PARAMETER_MISSING_DEFAULT_VALUE = 1135
IMPORTS_NOT_ALPHABETIZED = 1140
IMPORT_CONTAINS_WILDCARD = 1141
UNUSED_IMPORT = 1142
INVALID_TRACE_SEVERITY_LEVEL = 1250
MISSING_TRACE_SEVERITY_LEVEL = 1251
MISSING_TRACE_MESSAGE = 1252
REMOVE_TRACE_BEFORE_SUBMIT = 1253
REMOVE_COMMENT_BEFORE_SUBMIT = 1254
# End of list of ActionScript specific errors.
NEW_ERRORS = frozenset([
# Errors added after 2.0.2:
WRONG_INDENTATION,
MISSING_SEMICOLON,
# Errors added after 2.3.9:
JSDOC_MISSING_VAR_ARGS_TYPE,
JSDOC_MISSING_VAR_ARGS_NAME,
# Errors added after 2.3.13:
])
|
bsd-3-clause
|
willkg/pytest-wholenodeid
|
pytest_wholenodeid.py
|
1
|
1244
|
import sys
from _pytest.terminal import TerminalReporter
import pytest
__version__ = '0.2'
__releasedate__ = '20150826'
class WholeNodeIDTerminalReporter(TerminalReporter):
def _getfailureheadline(self, rep):
if hasattr(rep, 'location'):
fspath, lineno, domain = rep.location
# Use the whole nodeid so you can copy/paste it to run the test
return '::'.join([fspath, domain.replace('.', '::')])
else:
return "test session"
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "reporting", after="general")
group._addoption(
'--nowholenodeid', action="store_false", dest="wholenodeid", default=True,
help=(
"disable pytest-wholenodeid"
)
)
@pytest.mark.trylast
def pytest_configure(config):
if config.option.wholenodeid:
# Get the standard terminal reporter plugin and replace it with our
standard_reporter = config.pluginmanager.getplugin('terminalreporter')
wholenodeid_reporter = WholeNodeIDTerminalReporter(config, sys.stdout)
config.pluginmanager.unregister(standard_reporter)
config.pluginmanager.register(wholenodeid_reporter, 'terminalreporter')
|
bsd-3-clause
|
jeffreymingyue/ansible
|
lib/ansible/plugins/callback/osx_say.py
|
144
|
2882
|
# (C) 2012, Michael DeHaan, <[email protected]>
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import subprocess
import os
from ansible.plugins.callback import CallbackBase
FAILED_VOICE="Zarvox"
REGULAR_VOICE="Trinoids"
HAPPY_VOICE="Cellos"
LASER_VOICE="Princess"
SAY_CMD="/usr/bin/say"
class CallbackModule(CallbackBase):
"""
makes Ansible much more exciting on OS X.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'notification'
CALLBACK_NAME = 'osx_say'
def __init__(self, display):
super(CallbackModule, self).__init__(display)
# plugin disable itself if say is not present
# ansible will not call any callback if disabled is set to True
if not os.path.exists(SAY_CMD):
self.disabled = True
self._display.warning("%s does not exist, plugin %s disabled" % (SAY_CMD, os.path.basename(__file__)) )
def say(self, msg, voice):
subprocess.call([SAY_CMD, msg, "--voice=%s" % (voice)])
def runner_on_failed(self, host, res, ignore_errors=False):
self.say("Failure on host %s" % host, FAILED_VOICE)
def runner_on_ok(self, host, res):
self.say("pew", LASER_VOICE)
def runner_on_skipped(self, host, item=None):
self.say("pew", LASER_VOICE)
def runner_on_unreachable(self, host, res):
self.say("Failure on host %s" % host, FAILED_VOICE)
def runner_on_async_ok(self, host, res, jid):
self.say("pew", LASER_VOICE)
def runner_on_async_failed(self, host, res, jid):
self.say("Failure on host %s" % host, FAILED_VOICE)
def playbook_on_start(self):
self.say("Running Playbook", REGULAR_VOICE)
def playbook_on_notify(self, host, handler):
self.say("pew", LASER_VOICE)
def playbook_on_task_start(self, name, is_conditional):
if not is_conditional:
self.say("Starting task: %s" % name, REGULAR_VOICE)
else:
self.say("Notifying task: %s" % name, REGULAR_VOICE)
def playbook_on_setup(self):
self.say("Gathering facts", REGULAR_VOICE)
def playbook_on_play_start(self, name):
self.say("Starting play: %s" % name, HAPPY_VOICE)
def playbook_on_stats(self, stats):
self.say("Play complete", HAPPY_VOICE)
|
gpl-3.0
|
Elettronik/SickRage
|
lib/rtorrent/lib/xmlrpc/basic_auth.py
|
95
|
2681
|
#
# Copyright (c) 2013 Dean Gardiner, <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from base64 import encodestring
import string
import xmlrpclib
class BasicAuthTransport(xmlrpclib.Transport):
def __init__(self, username=None, password=None):
xmlrpclib.Transport.__init__(self)
self.username = username
self.password = password
def send_auth(self, h):
if self.username is not None and self.password is not None:
h.putheader('AUTHORIZATION', "Basic %s" % string.replace(
encodestring("%s:%s" % (self.username, self.password)),
"\012", ""
))
def single_request(self, host, handler, request_body, verbose=0):
# issue XML-RPC request
h = self.make_connection(host)
if verbose:
h.set_debuglevel(1)
try:
self.send_request(h, handler, request_body)
self.send_host(h, host)
self.send_user_agent(h)
self.send_auth(h)
self.send_content(h, request_body)
response = h.getresponse(buffering=True)
if response.status == 200:
self.verbose = verbose
return self.parse_response(response)
except xmlrpclib.Fault:
raise
except Exception:
self.close()
raise
#discard any response data and raise exception
if response.getheader("content-length", 0):
response.read()
raise xmlrpclib.ProtocolError(
host + handler,
response.status, response.reason,
response.msg,
)
|
gpl-3.0
|
ACBL-Bridge/Bridge-Application
|
Logic/card_value.py
|
1
|
1433
|
from simple_deck import *
from verbose import *
class CardValue:
#Dictionary of values of the cards used for the ACBL API.
trueval = {'c2': 1,
'c3': 2,
'c4': 3,
'c5': 4,
'c6': 5,
'c7': 6,
'c8': 7,
'c9': 8,
'ct': 9,
'cj': 10,
'cq': 11,
'ck': 12,
'ca': 13,
'd2': 14,
'd3': 15,
'd4': 16,
'd5': 17,
'd6': 18,
'd7': 19,
'd8': 20,
'd9': 21,
'dt': 22,
'dj': 23,
'dq': 24,
'dk': 25,
'da': 26,
'h2': 27,
'h3': 28,
'h4': 29,
'h5': 30,
'h6': 31,
'h7': 32,
'h8': 33,
'h9': 34,
'ht': 35,
'hj': 36,
'hq': 37,
'hk': 38,
'ha': 39,
's2': 40,
's3': 41,
's4': 42,
's5': 43,
's6': 44,
's7': 45,
's8': 46,
's9': 47,
'st': 48,
'sj': 49,
'sq': 50,
'sk': 51,
'sa': 52
}
|
mit
|
ZhangXinNan/tensorflow
|
tensorflow/python/kernel_tests/pool_test.py
|
24
|
14407
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for unified pooling functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def pool_direct_single_axis(
input, # pylint: disable=redefined-builtin
axis,
window_size,
pooling_type,
padding,
dilation_rate,
stride):
"""Numpy implementation of pooling along a single axis.
This is intended for testing only, and therefore isn't particularly efficient.
See pool_direct below for the meaning of the arguments.
Args:
input: numpy array.
axis: axis along which to perform pooling.
window_size: int >= 1. Size of pooling window within axis.
pooling_type: either "MAX" or "AVG".
padding: either "SAME" or "VALID".
dilation_rate: int >= 1. Dilation factor for window, i.e. stride at which
to sample input.
stride: int >= 1. Stride at which to generate output.
Returns:
pooling output array of rank N+2.
Raises:
ValueError: if arguments are invalid.
"""
effective_window_size = (window_size - 1) * dilation_rate + 1
input_size = input.shape[axis]
if padding == "SAME":
output_size = int(math.ceil(input_size / stride))
total_padding_amount = max(
0, (output_size - 1) * stride + effective_window_size - input_size)
before_padding = total_padding_amount // 2
elif padding == "VALID":
output_size = int(
math.ceil((input_size - effective_window_size + 1) / stride))
before_padding = 0
else:
raise ValueError("Unsupported padding type: %r" % (padding,))
output_shape = input.shape[:axis] + (output_size,) + input.shape[axis + 1:]
output = np.zeros(output_shape, input.dtype)
initial_dim_selector = tuple(np.s_[:] for _ in range(axis))
if pooling_type == "MAX":
pooling_func = np.max
elif pooling_type == "AVG":
pooling_func = np.mean
else:
raise ValueError("Unsupported pooling type: %r" % (pooling_type,))
for output_pos in range(output_size):
input_start_pos = output_pos * stride - before_padding
input_end_pos = min(input_start_pos + effective_window_size, input_size)
if input_start_pos < 0:
input_start_pos += dilation_rate
input_slice = np.s_[input_start_pos:input_end_pos:dilation_rate]
output[initial_dim_selector + (output_pos,)] = pooling_func(
input[initial_dim_selector + (input_slice,)], axis=axis)
return output
def pool_direct(
input, # pylint: disable=redefined-builtin
window_shape,
pooling_type,
padding, # pylint: disable=redefined-builtin
dilation_rate,
strides,
data_format=None):
"""Numpy implementation of pooling.
This is intended for testing only, and therefore isn't particularly efficient.
See tensorflow.nn.pool.
Args:
input: numpy array of rank N+2.
window_shape: Sequence of N ints >= 1.
pooling_type: either "MAX" or "AVG".
padding: either "SAME" or "VALID".
dilation_rate: Sequence of N ints >= 1.
strides: Sequence of N ints >= 1.
data_format: If specified and starts with "NC", indicates that second
dimension, rather than the last dimension, specifies the channel.
Returns:
pooling output array of rank N+2.
Raises:
ValueError: if arguments are invalid.
"""
if data_format is None or not data_format.startswith("NC"):
spatial_start_dim = 1
else:
spatial_start_dim = 2
output = input
for i in range(len(window_shape)):
output = pool_direct_single_axis(
input=output,
axis=i + spatial_start_dim,
window_size=window_shape[i],
pooling_type=pooling_type,
padding=padding,
dilation_rate=dilation_rate[i],
stride=strides[i])
return output
class PoolingTest(test.TestCase):
def _test(self, input_shape, **kwargs):
# Use negative numbers to make sure there isn't any zero padding getting
# used.
x = -np.arange(
np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1
y1 = pool_direct(input=x, **kwargs)
y2 = nn_ops.pool(input=x, **kwargs)
self.assertAllClose(y1, y2.eval(), rtol=1e-2, atol=1e-2)
def testPoolSimple(self):
with self.test_session(use_gpu=test.is_gpu_available()):
for padding in ["SAME", "VALID"]:
for pooling_type in ["MAX", "AVG"]:
self._test(
input_shape=[1, 1, 10, 1],
window_shape=[1, 3],
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1, 1],
strides=[1, 2])
def testPool1D(self):
with self.test_session(use_gpu=test.is_gpu_available()):
for padding in ["SAME", "VALID"]:
for pooling_type in ["MAX", "AVG"]:
for input_shape in [[2, 9, 2], [2, 10, 2]]:
for window_shape in [[1], [2], [3]]:
if padding != "SAME":
for dilation_rate in [[1], [2], [3]]:
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1])
for strides in [[1], [2], [3]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1],
strides=strides)
def testPool2D(self):
with self.test_session(use_gpu=test.is_gpu_available()):
for padding in ["SAME", "VALID"]:
for pooling_type in ["MAX", "AVG"]:
for input_shape in [[2, 9, 10, 2], [2, 10, 9, 2]]:
for window_shape in [[1, 1], [2, 1], [2, 3]]:
if padding != "SAME":
for dilation_rate in [[1, 1], [2, 1], [1, 2], [2, 3]]:
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1, 1])
for strides in [[1, 1], [2, 1], [1, 2], [2, 3]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1, 1],
strides=strides)
def testPool3D(self):
with self.test_session(use_gpu=test.is_gpu_available()):
for padding in ["SAME", "VALID"]:
for pooling_type in ["MAX", "AVG"]:
for input_shape in [[2, 9, 10, 11, 2], [2, 10, 9, 11, 2]]:
for window_shape in [[1, 1, 1], [2, 1, 2], [2, 3, 2]]:
if padding != "SAME":
for dilation_rate in [[1, 1, 1], [2, 1, 2], [1, 2, 2],
[2, 3, 3]]:
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1, 1, 1])
for strides in [[1, 1, 1], [2, 1, 2], [1, 2, 2], [2, 3, 3]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1, 1, 1],
strides=strides)
def testPoolNC(self):
if test.is_gpu_available(cuda_only=True):
# "NC*" format is currently only supported on CUDA.
with self.test_session(use_gpu=True):
for padding in ["SAME", "VALID"]:
self._test(
input_shape=[2, 2, 9],
window_shape=[2],
padding=padding,
pooling_type="MAX",
strides=[1],
dilation_rate=[1],
data_format="NCW")
self._test(
input_shape=[2, 2, 9],
window_shape=[2],
padding=padding,
pooling_type="MAX",
strides=[2],
dilation_rate=[1],
data_format="NCW")
self._test(
input_shape=[2, 2, 7, 9],
window_shape=[2, 2],
padding=padding,
pooling_type="MAX",
strides=[1, 2],
dilation_rate=[1, 1],
data_format="NCHW")
self._test(
input_shape=[2, 2, 7, 5, 3],
window_shape=[2, 2, 2],
padding=padding,
pooling_type="MAX",
strides=[1, 2, 1],
dilation_rate=[1, 1, 1],
data_format="NCDHW")
self._test(
input_shape=[2, 2, 7, 9],
window_shape=[2, 2],
padding="VALID",
pooling_type="MAX",
strides=[1, 1],
dilation_rate=[2, 2],
data_format="NCHW")
def _test_gradient(self, input_shape, **kwargs):
x_val = -np.arange(
np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1
x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
output = nn_ops.pool(input=x, **kwargs)
y_shape = output.get_shape().as_list()
err = gradient_checker.compute_gradient_error(
[x], [input_shape], output, y_shape, x_init_value=[x_val])
err_tolerance = 1e-2
self.assertLess(err, err_tolerance)
def testGradient1D(self):
with self.test_session(use_gpu=test.is_gpu_available()):
for padding in ["SAME", "VALID"]:
for pooling_type in ["AVG", "MAX"]:
for input_shape in [[2, 5, 2], [1, 4, 1]]:
for window_shape in [[1], [2]]:
if padding != "SAME":
for dilation_rate in [[1], [2]]:
self._test_gradient(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1])
for strides in [[1], [2]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1],
strides=strides)
def testGradient2D(self):
with self.test_session(use_gpu=test.is_gpu_available()):
for padding in ["SAME", "VALID"]:
for pooling_type in ["AVG", "MAX"]:
for input_shape in [[2, 4, 5, 2], [1, 5, 4, 1]]:
for window_shape in [[1, 1], [2, 1], [2, 2]]:
if padding != "SAME":
for dilation_rate in [[1, 1], [2, 1], [2, 2]]:
self._test_gradient(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1, 1])
for strides in [[1, 1], [2, 1], [1, 2], [2, 2]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1, 1],
strides=strides)
def testGradient3D(self):
with self.test_session(use_gpu=test.is_gpu_available()):
for padding in ["SAME", "VALID"]:
for pooling_type in ["AVG", "MAX"]:
for input_shape in [[1, 3, 5, 4, 1], [1, 5, 4, 3, 1]]:
for window_shape in [[1, 1, 1], [2, 1, 2], [2, 2, 2]]:
if padding != "SAME":
for dilation_rate in [[1, 1, 1], [2, 1, 2], [2, 2, 2]]:
self._test_gradient(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1, 1, 1])
for strides in [[1, 1, 1], [2, 1, 2], [2, 2, 2]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1, 1, 1],
strides=strides)
if __name__ == "__main__":
test.main()
|
apache-2.0
|
tgalal/yowsup
|
yowsup/layers/protocol_acks/protocolentities/ack_incoming.py
|
70
|
1304
|
from yowsup.structs import ProtocolEntity, ProtocolTreeNode
from .ack import AckProtocolEntity
class IncomingAckProtocolEntity(AckProtocolEntity):
'''
<ack t="{{TIMESTAMP}}" from="{{FROM_JID}}" id="{{MESSAGE_ID}}" class="{{message | receipt | ?}}">
</ack>
'''
def __init__(self, _id, _class, _from, timestamp):
super(IncomingAckProtocolEntity, self).__init__(_id, _class)
self.setIncomingData(_from, timestamp)
def setIncomingData(self, _from, timestamp):
self._from = _from
self.timestamp = timestamp
def toProtocolTreeNode(self):
node = super(IncomingAckProtocolEntity, self).toProtocolTreeNode()
node.setAttribute("from", self._from)
node.setAttribute("t", self.timestamp)
return node
def __str__(self):
out = super(IncomingAckProtocolEntity, self).__str__()
out += "From: %s\n" % self._from
out += "timestamp: %s\n" % self.timestamp
return out
@staticmethod
def fromProtocolTreeNode(node):
entity = AckProtocolEntity.fromProtocolTreeNode(node)
entity.__class__ = IncomingAckProtocolEntity
entity.setIncomingData(
node.getAttributeValue("from"),
node.getAttributeValue("t")
)
return entity
|
gpl-3.0
|
Orochimarufan/youtube-dl
|
youtube_dl/extractor/ondemandkorea.py
|
62
|
2036
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
ExtractorError,
js_to_json,
)
class OnDemandKoreaIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?ondemandkorea\.com/(?P<id>[^/]+)\.html'
_GEO_COUNTRIES = ['US', 'CA']
_TEST = {
'url': 'http://www.ondemandkorea.com/ask-us-anything-e43.html',
'info_dict': {
'id': 'ask-us-anything-e43',
'ext': 'mp4',
'title': 'Ask Us Anything : E43',
'thumbnail': r're:^https?://.*\.jpg$',
},
'params': {
'skip_download': 'm3u8 download'
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id, fatal=False)
if not webpage:
# Page sometimes returns captcha page with HTTP 403
raise ExtractorError(
'Unable to access page. You may have been blocked.',
expected=True)
if 'msg_block_01.png' in webpage:
self.raise_geo_restricted(
msg='This content is not available in your region',
countries=self._GEO_COUNTRIES)
if 'This video is only available to ODK PLUS members.' in webpage:
raise ExtractorError(
'This video is only available to ODK PLUS members.',
expected=True)
title = self._og_search_title(webpage)
jw_config = self._parse_json(
self._search_regex(
r'(?s)jwplayer\(([\'"])(?:(?!\1).)+\1\)\.setup\s*\((?P<options>.+?)\);',
webpage, 'jw config', group='options'),
video_id, transform_source=js_to_json)
info = self._parse_jwplayer_data(
jw_config, video_id, require_title=False, m3u8_id='hls',
base_url=url)
info.update({
'title': title,
'thumbnail': self._og_search_thumbnail(webpage),
})
return info
|
unlicense
|
Jaccorot/django-cms
|
cms/south_migrations/0053_auto__add_field_title_published__add_field_title_publisher_is_draft__a.py
|
63
|
20404
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Title.published'
db.add_column(u'cms_title', 'published',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Title.publisher_is_draft'
db.add_column(u'cms_title', 'publisher_is_draft',
self.gf('django.db.models.fields.BooleanField')(default=True, db_index=True),
keep_default=False)
# Adding field 'Title.publisher_public'
db.add_column(u'cms_title', 'publisher_public',
self.gf('django.db.models.fields.related.OneToOneField')(related_name='publisher_draft', unique=True, null=True, to=orm['cms.Title']),
keep_default=False)
# Adding field 'Title.publisher_state'
db.add_column(u'cms_title', 'publisher_state',
self.gf('django.db.models.fields.SmallIntegerField')(default=0, db_index=True),
keep_default=False)
# Adding field 'Page.published_languages'
db.add_column(u'cms_page', 'published_languages',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
# Adding field 'Page.languages'
db.add_column(u'cms_page', 'languages',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Title.published'
db.delete_column(u'cms_title', 'published')
# Deleting field 'Title.publisher_is_draft'
db.delete_column(u'cms_title', 'publisher_is_draft')
# Deleting field 'Title.publisher_public'
db.delete_column(u'cms_title', 'publisher_public_id')
# Deleting field 'Title.publisher_state'
db.delete_column(u'cms_title', 'publisher_state')
# Deleting field 'Page.published_languages'
db.delete_column(u'cms_page', 'published_languages')
# Deleting field 'Page.languages'
db.delete_column(u'cms_page', 'languages')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'unique_together': "(('publisher_is_draft', 'application_namespace'),)", 'object_name': 'Page'},
'application_namespace': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_home': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'published_languages': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'revision_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_pages'", 'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'default': "'INHERIT'", 'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.pagemoderatorstate': {
'Meta': {'ordering': "('page', 'action', '-created')", 'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [u'auth.User']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_users'", 'to': u"orm['auth.User']"}),
u'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': [u'auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_usergroups'", 'to': u"orm['auth.User']"}),
u'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cms.placeholderreference': {
'Meta': {'object_name': 'PlaceholderReference', 'db_table': "u'cmsplugin_placeholderreference'", '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'placeholder_ref': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'})
},
'cms.staticplaceholder': {
'Meta': {'object_name': 'StaticPlaceholder'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'blank': 'True'}),
'creation_method': ('django.db.models.fields.CharField', [], {'default': "'code'", 'max_length': '20', 'blank': 'True'}),
'dirty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'draft': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'static_draft'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'public': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'static_public'", 'null': 'True', 'to': "orm['cms.Placeholder']"})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)", 'object_name': 'Title'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [], {'max_length': '155', 'null': 'True', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Title']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms.usersettings': {
'Meta': {'object_name': 'UserSettings'},
'clipboard': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_usersettings'", 'to': u"orm['auth.User']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms']
|
bsd-3-clause
|
robertjpayne/rethinkdb
|
test/scenarios/kill_secondary.py
|
12
|
3268
|
#!/usr/bin/env python
# Copyright 2010-2016 RethinkDB, all rights reserved.
import os, sys, time
from pprint import pformat
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common')))
import rdb_unittest, scenario_common, utils, vcoptparse, workload_runner
from utils import print_with_time
numNodes = 3
op = vcoptparse.OptParser()
workload_runner.prepare_option_parser_for_split_or_continuous_workload(op)
scenario_common.prepare_option_parser_mode_flags(op)
opts = op.parse(sys.argv)
_, command_prefix, server_options = scenario_common.parse_mode_flags(opts)
class KillSecondary(rdb_unittest.RdbTestCase):
shards = 1
replicas = numNodes
server_command_prefix = command_prefix
server_extra_options = server_options
def test_kill_secondary(self):
primary = self.getPrimaryForShard(0)
secondary = self.getReplicaForShard(0)
conn = self.r.connect(host=primary.host, port=primary.driver_port)
issues = list(self.r.db('rethinkdb').table('current_issues').filter(self.r.row["type"] != "memory_error").run(self.conn))
self.assertEqual(issues, [], 'The issues list was not empty:\n%r' % utils.RePrint.pformat(issues))
workload_ports = workload_runner.RDBPorts(host=primary.host, http_port=primary.http_port, rdb_port=primary.driver_port, db_name=self.dbName, table_name=self.tableName)
with workload_runner.SplitOrContinuousWorkload(opts, workload_ports) as workload:
print_with_time("Starting workload")
workload.run_before()
self.cluster.check()
issues = list(self.r.db('rethinkdb').table('current_issues').filter(self.r.row["type"] != "memory_error").run(self.conn))
self.assertEqual(issues, [], 'The issues list was not empty:\n%r' % utils.RePrint.pformat(issues))
print_with_time("Killing the secondary")
secondary.kill()
print_with_time("Checking that the table_availability issue shows up")
deadline = time.time() + 5
last_error = None
while time.time() < deadline:
try:
issues = list(self.r.db('rethinkdb').table('current_issues').filter({'type':'table_availability', 'info':{'db':self.dbName, 'table':self.tableName}}).run(conn))
self.assertEqual(len(issues), 1, 'The server did not record the single issue for the killed secondary server:\n%s' % pformat(issues))
issue = issues[0]
self.assertEqual(issue['critical'], False)
self.assertEqual(issue['info']['status']['ready_for_reads'], True)
self.assertEqual(issue['info']['status']['ready_for_writes'], True)
break
except Exception as e:
last_error = e
time.sleep(.2)
else:
raise last_error
print_with_time("Running after workload")
workload.run_after()
print_with_time("Done")
# ===== main
if __name__ == '__main__':
rdb_unittest.main()
|
apache-2.0
|
MattsFleaMarket/python-for-android
|
python3-alpha/python3-src/Lib/test/test_winsound.py
|
60
|
9242
|
# Ridiculously simple test of the winsound module for Windows.
import unittest
from test import support
support.requires('audio')
import time
import os
import subprocess
winsound = support.import_module('winsound')
ctypes = support.import_module('ctypes')
import winreg
def has_sound(sound):
"""Find out if a particular event is configured with a default sound"""
try:
# Ask the mixer API for the number of devices it knows about.
# When there are no devices, PlaySound will fail.
if ctypes.windll.winmm.mixerGetNumDevs() is 0:
return False
key = winreg.OpenKeyEx(winreg.HKEY_CURRENT_USER,
"AppEvents\Schemes\Apps\.Default\{0}\.Default".format(sound))
value = winreg.EnumValue(key, 0)[1]
if value is not "":
return True
else:
return False
except WindowsError:
return False
class BeepTest(unittest.TestCase):
# As with PlaySoundTest, incorporate the _have_soundcard() check
# into our test methods. If there's no audio device present,
# winsound.Beep returns 0 and GetLastError() returns 127, which
# is: ERROR_PROC_NOT_FOUND ("The specified procedure could not
# be found"). (FWIW, virtual/Hyper-V systems fall under this
# scenario as they have no sound devices whatsoever (not even
# a legacy Beep device).)
def test_errors(self):
self.assertRaises(TypeError, winsound.Beep)
self.assertRaises(ValueError, winsound.Beep, 36, 75)
self.assertRaises(ValueError, winsound.Beep, 32768, 75)
def test_extremes(self):
self._beep(37, 75)
self._beep(32767, 75)
def test_increasingfrequency(self):
for i in range(100, 2000, 100):
self._beep(i, 75)
def _beep(self, *args):
# these tests used to use _have_soundcard(), but it's quite
# possible to have a soundcard, and yet have the beep driver
# disabled. So basically, we have no way of knowing whether
# a beep should be produced or not, so currently if these
# tests fail we're ignoring them
#
# XXX the right fix for this is to define something like
# _have_enabled_beep_driver() and use that instead of the
# try/except below
try:
winsound.Beep(*args)
except RuntimeError:
pass
class MessageBeepTest(unittest.TestCase):
def tearDown(self):
time.sleep(0.5)
def test_default(self):
self.assertRaises(TypeError, winsound.MessageBeep, "bad")
self.assertRaises(TypeError, winsound.MessageBeep, 42, 42)
winsound.MessageBeep()
def test_ok(self):
winsound.MessageBeep(winsound.MB_OK)
def test_asterisk(self):
winsound.MessageBeep(winsound.MB_ICONASTERISK)
def test_exclamation(self):
winsound.MessageBeep(winsound.MB_ICONEXCLAMATION)
def test_hand(self):
winsound.MessageBeep(winsound.MB_ICONHAND)
def test_question(self):
winsound.MessageBeep(winsound.MB_ICONQUESTION)
class PlaySoundTest(unittest.TestCase):
def test_errors(self):
self.assertRaises(TypeError, winsound.PlaySound)
self.assertRaises(TypeError, winsound.PlaySound, "bad", "bad")
self.assertRaises(
RuntimeError,
winsound.PlaySound,
"none", winsound.SND_ASYNC | winsound.SND_MEMORY
)
@unittest.skipUnless(has_sound("SystemAsterisk"),
"No default SystemAsterisk")
def test_alias_asterisk(self):
if _have_soundcard():
winsound.PlaySound('SystemAsterisk', winsound.SND_ALIAS)
else:
self.assertRaises(
RuntimeError,
winsound.PlaySound,
'SystemAsterisk', winsound.SND_ALIAS
)
@unittest.skipUnless(has_sound("SystemExclamation"),
"No default SystemExclamation")
def test_alias_exclamation(self):
if _have_soundcard():
winsound.PlaySound('SystemExclamation', winsound.SND_ALIAS)
else:
self.assertRaises(
RuntimeError,
winsound.PlaySound,
'SystemExclamation', winsound.SND_ALIAS
)
@unittest.skipUnless(has_sound("SystemExit"), "No default SystemExit")
def test_alias_exit(self):
if _have_soundcard():
winsound.PlaySound('SystemExit', winsound.SND_ALIAS)
else:
self.assertRaises(
RuntimeError,
winsound.PlaySound,
'SystemExit', winsound.SND_ALIAS
)
@unittest.skipUnless(has_sound("SystemHand"), "No default SystemHand")
def test_alias_hand(self):
if _have_soundcard():
winsound.PlaySound('SystemHand', winsound.SND_ALIAS)
else:
self.assertRaises(
RuntimeError,
winsound.PlaySound,
'SystemHand', winsound.SND_ALIAS
)
@unittest.skipUnless(has_sound("SystemQuestion"),
"No default SystemQuestion")
def test_alias_question(self):
if _have_soundcard():
winsound.PlaySound('SystemQuestion', winsound.SND_ALIAS)
else:
self.assertRaises(
RuntimeError,
winsound.PlaySound,
'SystemQuestion', winsound.SND_ALIAS
)
def test_alias_fallback(self):
# This test can't be expected to work on all systems. The MS
# PlaySound() docs say:
#
# If it cannot find the specified sound, PlaySound uses the
# default system event sound entry instead. If the function
# can find neither the system default entry nor the default
# sound, it makes no sound and returns FALSE.
#
# It's known to return FALSE on some real systems.
# winsound.PlaySound('!"$%&/(#+*', winsound.SND_ALIAS)
return
def test_alias_nofallback(self):
if _have_soundcard():
# Note that this is not the same as asserting RuntimeError
# will get raised: you cannot convert this to
# self.assertRaises(...) form. The attempt may or may not
# raise RuntimeError, but it shouldn't raise anything other
# than RuntimeError, and that's all we're trying to test
# here. The MS docs aren't clear about whether the SDK
# PlaySound() with SND_ALIAS and SND_NODEFAULT will return
# True or False when the alias is unknown. On Tim's WinXP
# box today, it returns True (no exception is raised). What
# we'd really like to test is that no sound is played, but
# that requires first wiring an eardrum class into unittest
# <wink>.
try:
winsound.PlaySound(
'!"$%&/(#+*',
winsound.SND_ALIAS | winsound.SND_NODEFAULT
)
except RuntimeError:
pass
else:
self.assertRaises(
RuntimeError,
winsound.PlaySound,
'!"$%&/(#+*', winsound.SND_ALIAS | winsound.SND_NODEFAULT
)
def test_stopasync(self):
if _have_soundcard():
winsound.PlaySound(
'SystemQuestion',
winsound.SND_ALIAS | winsound.SND_ASYNC | winsound.SND_LOOP
)
time.sleep(0.5)
try:
winsound.PlaySound(
'SystemQuestion',
winsound.SND_ALIAS | winsound.SND_NOSTOP
)
except RuntimeError:
pass
else: # the first sound might already be finished
pass
winsound.PlaySound(None, winsound.SND_PURGE)
else:
# Issue 8367: PlaySound(None, winsound.SND_PURGE)
# does not raise on systems without a sound card.
pass
def _get_cscript_path():
"""Return the full path to cscript.exe or None."""
for dir in os.environ.get("PATH", "").split(os.pathsep):
cscript_path = os.path.join(dir, "cscript.exe")
if os.path.exists(cscript_path):
return cscript_path
__have_soundcard_cache = None
def _have_soundcard():
"""Return True iff this computer has a soundcard."""
global __have_soundcard_cache
if __have_soundcard_cache is None:
cscript_path = _get_cscript_path()
if cscript_path is None:
# Could not find cscript.exe to run our VBScript helper. Default
# to True: most computers these days *do* have a soundcard.
return True
check_script = os.path.join(os.path.dirname(__file__),
"check_soundcard.vbs")
p = subprocess.Popen([cscript_path, check_script],
stdout=subprocess.PIPE)
__have_soundcard_cache = not p.wait()
p.stdout.close()
return __have_soundcard_cache
def test_main():
support.run_unittest(BeepTest, MessageBeepTest, PlaySoundTest)
if __name__=="__main__":
test_main()
|
apache-2.0
|
Karm/qpid-proton
|
examples/python/direct_recv.py
|
33
|
2189
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
import optparse
from proton.handlers import MessagingHandler
from proton.reactor import Container
class Recv(MessagingHandler):
def __init__(self, url, count):
super(Recv, self).__init__()
self.url = url
self.expected = count
self.received = 0
def on_start(self, event):
self.acceptor = event.container.listen(self.url)
def on_message(self, event):
if event.message.id and event.message.id < self.received:
# ignore duplicate message
return
if self.expected == 0 or self.received < self.expected:
print(event.message.body)
self.received += 1
if self.received == self.expected:
event.receiver.close()
event.connection.close()
self.acceptor.close()
parser = optparse.OptionParser(usage="usage: %prog [options]")
parser.add_option("-a", "--address", default="localhost:5672/examples",
help="address from which messages are received (default %default)")
parser.add_option("-m", "--messages", type="int", default=100,
help="number of messages to receive; 0 receives indefinitely (default %default)")
opts, args = parser.parse_args()
try:
Container(Recv(opts.address, opts.messages)).run()
except KeyboardInterrupt: pass
|
apache-2.0
|
rajalokan/glance
|
releasenotes/source/conf.py
|
2
|
9174
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Glance Release Notes documentation build configuration file, created by
# sphinx-quickstart on Tue Nov 3 17:40:50 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'oslosphinx',
'reno.sphinxext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Glance Release Notes'
copyright = u'2015, Glance Developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from glance.version import version_info as glance_version
# The full version, including alpha/beta/rc tags.
release = glance_version.version_string_with_vcs()
# The short X.Y version.
version = glance_version.canonical_version_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'GlanceReleaseNotesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'GlanceReleaseNotes.tex', u'Glance Release Notes Documentation',
u'Glance Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'glancereleasenotes', u'Glance Release Notes Documentation',
[u'Glance Developers'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'GlanceReleaseNotes', u'Glance Release Notes Documentation',
u'Glance Developers', 'GlanceReleaseNotes',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Internationalization output ------------------------------
locale_dirs = ['locale/']
|
apache-2.0
|
cainmatt/django
|
tests/user_commands/management/commands/hal.py
|
372
|
1024
|
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = "Useless command."
def add_arguments(self, parser):
parser.add_argument('args', metavar='app_label', nargs='*',
help='Specify the app label(s) to works on.')
parser.add_argument('--empty', action='store_true', dest='empty', default=False,
help="Do nothing.")
def handle(self, *app_labels, **options):
app_labels = set(app_labels)
if options['empty']:
self.stdout.write("Dave, I can't do that.")
return
if not app_labels:
raise CommandError("I'm sorry Dave, I'm afraid I can't do that.")
# raise an error if some --parameter is flowing from options to args
for app_label in app_labels:
if app_label.startswith('--'):
raise CommandError("Sorry, Dave, I can't let you do that.")
self.stdout.write("Dave, my mind is going. I can feel it. I can feel it.")
|
bsd-3-clause
|
BT-rmartin/server-tools
|
module_prototyper/models/ir_model_fields.py
|
26
|
1926
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2010 - 2014 Savoir-faire Linux
# (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import fields, models
from openerp.tools.translate import _
class ir_model_fields(models.Model):
"""Addition of text fields to fields."""
_inherit = "ir.model.fields"
notes = fields.Text('Notes to developers.')
helper = fields.Text('Helper')
# TODO: Make column1 and 2 required if a model has a m2m to itself
column1 = fields.Char(
'Column1',
help=_("name of the column referring to 'these' records in the "
"relation table"),
)
column2 = fields.Char(
'Column2',
help=_("name of the column referring to 'those' records in the "
"relation table"),
)
limit = fields.Integer('Read limit', help=_("Read limit"))
client_context = fields.Char(
'Context',
help=_("Context to use on the client side when handling the field "
"(python dictionary)"),
)
|
agpl-3.0
|
dragonflypl/material-start
|
node_modules/protractor/node_modules/accessibility-developer-tools/scripts/parse_aria_schemas.py
|
381
|
3069
|
import json
import re
import urllib
import xml.etree.ElementTree as ET
def parse_attributes():
schema = urllib.urlopen('http://www.w3.org/MarkUp/SCHEMA/aria-attributes-1.xsd')
tree = ET.parse(schema)
for node in tree.iter():
node.tag = re.sub(r'{.*}', r'', node.tag)
type_map = {
'states': 'state',
'props': 'property'
}
properties = {}
groups = tree.getroot().findall('attributeGroup')
print groups
for group in groups:
print(group.get('name'))
name_match = re.match(r'ARIA\.(\w+)\.attrib', group.get('name'))
if not name_match:
continue
group_type = name_match.group(1)
print group_type
if group_type not in type_map:
continue
type = type_map[group_type]
for child in group:
name = re.sub(r'aria-', r'', child.attrib['name'])
property = {}
property['type'] = type
if 'type' in child.attrib:
valueType = re.sub(r'xs:', r'', child.attrib['type'])
if valueType == 'IDREF':
property['valueType'] = 'idref'
elif valueType == 'IDREFS':
property['valueType'] = 'idref_list'
else:
property['valueType'] = valueType
else:
type_spec = child.findall('simpleType')[0]
restriction_spec = type_spec.findall('restriction')[0]
base = restriction_spec.attrib['base']
if base == 'xs:NMTOKENS':
property['valueType'] = 'token_list'
elif base == 'xs:NMTOKEN':
property['valueType'] = 'token'
else:
raise Exception('Unknown value type: %s' % base)
values = []
for value_type in restriction_spec:
values.append(value_type.get('value'))
property['values'] = values
if 'default' in child.attrib:
property['defaultValue'] = child.attrib['default']
properties[name] = property
return json.dumps(properties, sort_keys=True, indent=4, separators=(',', ': '))
if __name__ == "__main__":
attributes_json = parse_attributes()
constants_file = open('src/js/Constants.js', 'r')
new_constants_file = open('src/js/Constants.new.js', 'w')
in_autogen_block = False
for line in constants_file:
if not in_autogen_block:
new_constants_file.write('%s' % line)
if re.match(r'// BEGIN ARIA_PROPERTIES_AUTOGENERATED', line):
in_autogen_block = True
if re.match(r'// END ARIA_PROPERTIES_AUTOGENERATED', line):
break
new_constants_file.write('/** @type {Object.<string, Object>} */\n')
new_constants_file.write('axs.constants.ARIA_PROPERTIES = %s;\n' % attributes_json)
new_constants_file.write('// END ARIA_PROPERTIES_AUTOGENERATED\n')
for line in constants_file:
new_constants_file.write('%s' % line)
|
mit
|
bikong2/django
|
tests/model_fields/test_field_flags.py
|
154
|
7318
|
from django import test
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.db import models
from django.db.models.fields.related import (
ForeignKey, ForeignObject, ForeignObjectRel, ManyToManyField, ManyToOneRel,
OneToOneField,
)
from .models import AllFieldsModel
NON_CONCRETE_FIELDS = (
ForeignObject,
GenericForeignKey,
GenericRelation,
)
NON_EDITABLE_FIELDS = (
models.BinaryField,
GenericForeignKey,
GenericRelation,
)
RELATION_FIELDS = (
ForeignKey,
ForeignObject,
ManyToManyField,
OneToOneField,
GenericForeignKey,
GenericRelation,
)
MANY_TO_MANY_CLASSES = {
ManyToManyField,
}
MANY_TO_ONE_CLASSES = {
ForeignObject,
ForeignKey,
GenericForeignKey,
}
ONE_TO_MANY_CLASSES = {
ForeignObjectRel,
ManyToOneRel,
GenericRelation,
}
ONE_TO_ONE_CLASSES = {
OneToOneField,
}
FLAG_PROPERTIES = (
'concrete',
'editable',
'is_relation',
'model',
'hidden',
'one_to_many',
'many_to_one',
'many_to_many',
'one_to_one',
'related_model',
)
FLAG_PROPERTIES_FOR_RELATIONS = (
'one_to_many',
'many_to_one',
'many_to_many',
'one_to_one',
)
class FieldFlagsTests(test.SimpleTestCase):
@classmethod
def setUpClass(cls):
super(FieldFlagsTests, cls).setUpClass()
cls.fields = (
list(AllFieldsModel._meta.fields) +
list(AllFieldsModel._meta.virtual_fields)
)
cls.all_fields = (
cls.fields +
list(AllFieldsModel._meta.many_to_many) +
list(AllFieldsModel._meta.virtual_fields)
)
cls.fields_and_reverse_objects = (
cls.all_fields +
list(AllFieldsModel._meta.related_objects)
)
def test_each_field_should_have_a_concrete_attribute(self):
self.assertTrue(all(f.concrete.__class__ == bool for f in self.fields))
def test_each_field_should_have_an_editable_attribute(self):
self.assertTrue(all(f.editable.__class__ == bool for f in self.all_fields))
def test_each_field_should_have_a_has_rel_attribute(self):
self.assertTrue(all(f.is_relation.__class__ == bool for f in self.all_fields))
def test_each_object_should_have_auto_created(self):
self.assertTrue(
all(f.auto_created.__class__ == bool
for f in self.fields_and_reverse_objects)
)
def test_non_concrete_fields(self):
for field in self.fields:
if type(field) in NON_CONCRETE_FIELDS:
self.assertFalse(field.concrete)
else:
self.assertTrue(field.concrete)
def test_non_editable_fields(self):
for field in self.all_fields:
if type(field) in NON_EDITABLE_FIELDS:
self.assertFalse(field.editable)
else:
self.assertTrue(field.editable)
def test_related_fields(self):
for field in self.all_fields:
if type(field) in RELATION_FIELDS:
self.assertTrue(field.is_relation)
else:
self.assertFalse(field.is_relation)
def test_field_names_should_always_be_available(self):
for field in self.fields_and_reverse_objects:
self.assertTrue(field.name)
def test_all_field_types_should_have_flags(self):
for field in self.fields_and_reverse_objects:
for flag in FLAG_PROPERTIES:
self.assertTrue(hasattr(field, flag), "Field %s does not have flag %s" % (field, flag))
if field.is_relation:
true_cardinality_flags = sum(
getattr(field, flag) is True
for flag in FLAG_PROPERTIES_FOR_RELATIONS
)
# If the field has a relation, there should be only one of the
# 4 cardinality flags available.
self.assertEqual(1, true_cardinality_flags)
def test_cardinality_m2m(self):
m2m_type_fields = [
f for f in self.all_fields
if f.is_relation and f.many_to_many
]
# Test classes are what we expect
self.assertEqual(MANY_TO_MANY_CLASSES, {f.__class__ for f in m2m_type_fields})
# Ensure all m2m reverses are m2m
for field in m2m_type_fields:
reverse_field = field.remote_field
self.assertTrue(reverse_field.is_relation)
self.assertTrue(reverse_field.many_to_many)
self.assertTrue(reverse_field.related_model)
def test_cardinality_o2m(self):
o2m_type_fields = [
f for f in self.fields_and_reverse_objects
if f.is_relation and f.one_to_many
]
# Test classes are what we expect
self.assertEqual(ONE_TO_MANY_CLASSES, {f.__class__ for f in o2m_type_fields})
# Ensure all o2m reverses are m2o
for field in o2m_type_fields:
if field.concrete:
reverse_field = field.remote_field
self.assertTrue(reverse_field.is_relation and reverse_field.many_to_one)
def test_cardinality_m2o(self):
m2o_type_fields = [
f for f in self.fields_and_reverse_objects
if f.is_relation and f.many_to_one
]
# Test classes are what we expect
self.assertEqual(MANY_TO_ONE_CLASSES, {f.__class__ for f in m2o_type_fields})
# Ensure all m2o reverses are o2m
for obj in m2o_type_fields:
if hasattr(obj, 'field'):
reverse_field = obj.field
self.assertTrue(reverse_field.is_relation and reverse_field.one_to_many)
def test_cardinality_o2o(self):
o2o_type_fields = [
f for f in self.all_fields
if f.is_relation and f.one_to_one
]
# Test classes are what we expect
self.assertEqual(ONE_TO_ONE_CLASSES, {f.__class__ for f in o2o_type_fields})
# Ensure all o2o reverses are o2o
for obj in o2o_type_fields:
if hasattr(obj, 'field'):
reverse_field = obj.field
self.assertTrue(reverse_field.is_relation and reverse_field.one_to_one)
def test_hidden_flag(self):
incl_hidden = set(AllFieldsModel._meta.get_fields(include_hidden=True))
no_hidden = set(AllFieldsModel._meta.get_fields())
fields_that_should_be_hidden = (incl_hidden - no_hidden)
for f in incl_hidden:
self.assertEqual(f in fields_that_should_be_hidden, f.hidden)
def test_model_and_reverse_model_should_equal_on_relations(self):
for field in AllFieldsModel._meta.get_fields():
is_concrete_forward_field = field.concrete and field.related_model
if is_concrete_forward_field:
reverse_field = field.remote_field
self.assertEqual(field.model, reverse_field.related_model)
self.assertEqual(field.related_model, reverse_field.model)
def test_null(self):
# null isn't well defined for a ManyToManyField, but changing it to
# True causes backwards compatibility problems (#25320).
self.assertFalse(AllFieldsModel._meta.get_field('m2m').null)
self.assertTrue(AllFieldsModel._meta.get_field('reverse2').null)
|
bsd-3-clause
|
todddeluca/python-vagrant
|
vagrant/test.py
|
2
|
3435
|
"""
A TestCase class, tying together the Vagrant class and removing some of the boilerplate involved in writing tests
that leverage vagrant boxes.
"""
from unittest import TestCase
from vagrant import Vagrant, stderr_cm
__author__ = 'nick'
class VagrantTestCase(TestCase):
"""
TestCase class to control vagrant boxes during testing
vagrant_boxes: An iterable of vagrant boxes. If empty or None, all boxes will be used. Defaults to []
vagrant_root: The root directory that holds a Vagrantfile for configuration. Defaults to the working directory
restart_boxes: If True, the boxes will be restored to their initial states between each test, otherwise the boxes
will remain up. Defaults to False
"""
vagrant_boxes = []
vagrant_root = None
restart_boxes = False
__initial_box_statuses = {}
__cleanup_actions = {
Vagrant.NOT_CREATED: 'destroy',
Vagrant.POWEROFF: 'halt',
Vagrant.SAVED: 'suspend',
}
def __init__(self, *args, **kwargs):
"""Check that the vagrant_boxes attribute is not left empty, and is populated by all boxes if left blank"""
self.vagrant = Vagrant(self.vagrant_root, err_cm=stderr_cm)
if not self.vagrant_boxes:
boxes = [s.name for s in self.vagrant.status()]
if len(boxes) == 1:
self.vagrant_boxes = ['default']
else:
self.vagrant_boxes = boxes
super(VagrantTestCase, self).__init__(*args, **kwargs)
def assertBoxStatus(self, box, status):
"""Assertion for a box status"""
box_status = [s.state for s in self.vagrant.status() if s.name == box][0]
if box_status != status:
self.failureException('{} has status {}, not {}'.format(box, box_status, status))
def assertBoxUp(self, box):
"""Assertion for a box being up"""
self.assertBoxStatus(box, Vagrant.RUNNING)
def assertBoxSuspended(self, box):
"""Assertion for a box being up"""
self.assertBoxStatus(box, Vagrant.SAVED)
def assertBoxHalted(self, box):
"""Assertion for a box being up"""
self.assertBoxStatus(box, Vagrant.POWEROFF)
def assertBoxNotCreated(self, box):
"""Assertion for a box being up"""
self.assertBoxStatus(box, Vagrant.NOT_CREATED)
def run(self, result=None):
"""Override run to have provide a hook into an alternative to tearDownClass with a reference to self"""
self.setUpOnce()
run = super(VagrantTestCase, self).run(result)
self.tearDownOnce()
return run
def setUpOnce(self):
"""Collect the box states before starting"""
for box_name in self.vagrant_boxes:
box_state = [s.state for s in self.vagrant.status() if s.name == box_name][0]
self.__initial_box_statuses[box_name] = box_state
def tearDownOnce(self):
"""Restore all boxes to their initial states after running all tests, unless tearDown handled it already"""
if not self.restart_boxes:
self.restore_box_states()
def restore_box_states(self):
"""Restores all boxes to their original states"""
for box_name in self.vagrant_boxes:
action = self.__cleanup_actions.get(self.__initial_box_statuses[box_name])
if action:
getattr(self.vagrant, action)(vm_name=box_name)
def setUp(self):
"""Starts all boxes before running tests"""
for box_name in self.vagrant_boxes:
self.vagrant.up(vm_name=box_name)
super(VagrantTestCase, self).setUp()
def tearDown(self):
"""Returns boxes to their initial status after each test if self.restart_boxes is True"""
if self.restart_boxes:
self.restore_box_states()
super(VagrantTestCase, self).tearDown()
|
mit
|
ricardogsilva/PyWPS
|
tests/test_describe.py
|
1
|
11512
|
import unittest
from collections import namedtuple
from pywps import Process, Service, LiteralInput, ComplexInput, BoundingBoxInput
from pywps import LiteralOutput, ComplexOutput, BoundingBoxOutput
from pywps import E, WPS, OWS, OGCTYPE, Format, NAMESPACES, OGCUNIT
from pywps.inout.literaltypes import LITERAL_DATA_TYPES
from pywps.app.basic import xpath_ns
from pywps.inout.formats import Format
from pywps.inout.literaltypes import AllowedValue
from pywps.validator.allowed_value import ALLOWEDVALUETYPE
from tests.common import client_for
ProcessDescription = namedtuple('ProcessDescription', ['identifier', 'inputs'])
def get_data_type(el):
if el.text in LITERAL_DATA_TYPES:
return el.text
raise RuntimeError("Can't parse data type")
def get_describe_result(resp):
assert resp.status_code == 200
assert resp.headers['Content-Type'] == 'text/xml'
result = []
for desc_el in resp.xpath('/wps:ProcessDescriptions/ProcessDescription'):
[identifier_el] = xpath_ns(desc_el, './ows:Identifier')
inputs = []
for input_el in xpath_ns(desc_el, './DataInputs/Input'):
[input_identifier_el] = xpath_ns(input_el, './ows:Identifier')
input_identifier = input_identifier_el.text
literal_data_el_list = xpath_ns(input_el, './LiteralData')
complex_data_el_list = xpath_ns(input_el, './ComplexData')
if literal_data_el_list:
[literal_data_el] = literal_data_el_list
[data_type_el] = xpath_ns(literal_data_el, './ows:DataType')
data_type = get_data_type(data_type_el)
inputs.append((input_identifier, 'literal', data_type))
elif complex_data_el_list:
[complex_data_el] = complex_data_el_list
formats = []
for format_el in xpath_ns(complex_data_el,
'./Supported/Format'):
[mimetype_el] = xpath_ns(format_el, './ows:MimeType')
formats.append({'mime_type': mimetype_el.text})
inputs.append((input_identifier, 'complex', formats))
else:
raise RuntimeError("Can't parse input description")
result.append(ProcessDescription(identifier_el.text, inputs))
return result
class DescribeProcessTest(unittest.TestCase):
def setUp(self):
def hello(request): pass
def ping(request): pass
processes = [Process(hello, 'hello', 'Process Hello'), Process(ping, 'ping', 'Process Ping')]
self.client = client_for(Service(processes=processes))
def test_get_request_all_args(self):
resp = self.client.get('?Request=DescribeProcess&service=wps&version=1.0.0&identifier=all')
identifiers = [desc.identifier for desc in get_describe_result(resp)]
assert 'ping' in identifiers
assert 'hello' in identifiers
def test_get_request_zero_args(self):
resp = self.client.get('?Request=DescribeProcess&version=1.0.0&service=wps')
assert resp.status_code == 400 # bad request, identifier is missing
def test_get_request_nonexisting_process_args(self):
resp = self.client.get('?Request=DescribeProcess&version=1.0.0&service=wps&identifier=NONEXISTINGPROCESS')
assert resp.status_code == 400
def test_post_request_zero_args(self):
request_doc = WPS.DescribeProcess()
resp = self.client.post_xml(doc=request_doc)
assert resp.status_code == 400
def test_get_one_arg(self):
resp = self.client.get('?service=wps&version=1.0.0&Request=DescribeProcess&identifier=hello')
assert [pr.identifier for pr in get_describe_result(resp)] == ['hello']
def test_post_one_arg(self):
request_doc = WPS.DescribeProcess(
OWS.Identifier('hello'),
version='1.0.0'
)
resp = self.client.post_xml(doc=request_doc)
assert [pr.identifier for pr in get_describe_result(resp)] == ['hello']
def test_get_two_args(self):
resp = self.client.get('?Request=DescribeProcess'
'&service=wps'
'&version=1.0.0'
'&identifier=hello,ping')
result = get_describe_result(resp)
assert [pr.identifier for pr in result] == ['hello', 'ping']
def test_post_two_args(self):
request_doc = WPS.DescribeProcess(
OWS.Identifier('hello'),
OWS.Identifier('ping'),
version='1.0.0'
)
resp = self.client.post_xml(doc=request_doc)
result = get_describe_result(resp)
assert [pr.identifier for pr in result] == ['hello', 'ping']
class DescribeProcessInputTest(unittest.TestCase):
def describe_process(self, process):
client = client_for(Service(processes=[process]))
resp = client.get('?service=wps&version=1.0.0&Request=DescribeProcess&identifier=%s'
% process.identifier)
[result] = get_describe_result(resp)
return result
def test_one_literal_string_input(self):
def hello(request): pass
hello_process = Process(
hello,
'hello',
'Process Hello',
inputs=[LiteralInput('the_name', 'Input name')])
result = self.describe_process(hello_process)
assert result.inputs == [('the_name', 'literal', 'integer')]
def test_one_literal_integer_input(self):
def hello(request): pass
hello_process = Process(hello, 'hello',
'Process Hello',
inputs=[LiteralInput('the_number',
'Input number',
data_type='positiveInteger')])
result = self.describe_process(hello_process)
assert result.inputs == [('the_number', 'literal', 'positiveInteger')]
class InputDescriptionTest(unittest.TestCase):
def test_literal_integer_input(self):
literal = LiteralInput('foo', 'Literal foo', data_type='positiveInteger', uoms=['metre'])
doc = literal.describe_xml()
self.assertEqual(doc.tag, E.Input().tag)
[identifier_el] = xpath_ns(doc, './ows:Identifier')
self.assertEqual(identifier_el.text, 'foo')
[type_el] = xpath_ns(doc, './LiteralData/ows:DataType')
self.assertEqual(type_el.text, 'positiveInteger')
self.assertEqual(type_el.attrib['{%s}reference' % NAMESPACES['ows']],
OGCTYPE['positiveInteger'])
anyvalue = xpath_ns(doc, './LiteralData/ows:AnyValue')
self.assertEqual(len(anyvalue), 1)
def test_literal_allowed_values_input(self):
"""Test all around allowed_values
"""
literal = LiteralInput(
'foo',
'Foo',
data_type='integer',
uoms=['metre'],
allowed_values=(
1, 2, (5, 10), (12, 4, 24),
AllowedValue(
allowed_type=ALLOWEDVALUETYPE.RANGE,
minval=30,
maxval=33,
range_closure='closed-open')
)
)
doc = literal.describe_xml()
allowed_values = xpath_ns(doc, './LiteralData/ows:AllowedValues')
self.assertEqual(len(allowed_values), 1)
allowed_value = allowed_values[0]
values = xpath_ns(allowed_value, './ows:Value')
ranges = xpath_ns(allowed_value, './ows:Range')
self.assertEqual(len(values), 2)
self.assertEqual(len(ranges), 3)
def test_complex_input_identifier(self):
complex_in = ComplexInput('foo', 'Complex foo', supported_formats=[Format('bar/baz')])
doc = complex_in.describe_xml()
self.assertEqual(doc.tag, E.Input().tag)
[identifier_el] = xpath_ns(doc, './ows:Identifier')
self.assertEqual(identifier_el.text, 'foo')
def test_complex_input_default_and_supported(self):
complex_in = ComplexInput(
'foo',
'Complex foo',
supported_formats=[
Format('a/b'),
Format('c/d')
]
)
doc = complex_in.describe_xml()
[default_format] = xpath_ns(doc, './ComplexData/Default/Format')
[default_mime_el] = xpath_ns(default_format, './MimeType')
self.assertEqual(default_mime_el.text, 'a/b')
supported_mime_types = []
for supported_el in xpath_ns(doc, './ComplexData/Supported/Format'):
[mime_el] = xpath_ns(supported_el, './MimeType')
supported_mime_types.append(mime_el.text)
self.assertEqual(supported_mime_types, ['a/b', 'c/d'])
def test_bbox_input(self):
bbox = BoundingBoxInput('bbox', 'BBox foo',
crss=["EPSG:4326", "EPSG:3035"])
doc = bbox.describe_xml()
[inpt] = xpath_ns(doc, '/Input')
[default_crs] = xpath_ns(doc, './BoundingBoxData/Default/CRS')
supported = xpath_ns(doc, './BoundingBoxData/Supported/CRS')
self.assertEqual(inpt.attrib['minOccurs'], '1')
self.assertEqual(default_crs.text, 'EPSG:4326')
self.assertEqual(len(supported), 2)
class OutputDescriptionTest(unittest.TestCase):
def test_literal_output(self):
literal = LiteralOutput('literal', 'Literal foo', uoms=['metre'])
doc = literal.describe_xml()
[output] = xpath_ns(doc, '/Output')
[identifier] = xpath_ns(doc, '/Output/ows:Identifier')
[data_type] = xpath_ns(doc, '/Output/LiteralOutput/ows:DataType')
[uoms] = xpath_ns(doc, '/Output/LiteralOutput/UOMs')
[default_uom] = xpath_ns(uoms, './Default/ows:UOM')
supported_uoms = xpath_ns(uoms, './Supported/ows:UOM')
assert output is not None
assert identifier.text == 'literal'
assert data_type.attrib['{%s}reference' % NAMESPACES['ows']] == OGCTYPE['string']
assert uoms is not None
assert default_uom.text == 'metre'
assert default_uom.attrib['{%s}reference' % NAMESPACES['ows']] == OGCUNIT['metre']
assert len(supported_uoms) == 1
def test_complex_output(self):
complexo = ComplexOutput('complex', 'Complex foo', [Format('GML')])
doc = complexo.describe_xml()
[outpt] = xpath_ns(doc, '/Output')
[default] = xpath_ns(doc, '/Output/ComplexOutput/Default/Format/MimeType')
supported = xpath_ns(doc,
'/Output/ComplexOutput/Supported/Format/MimeType')
assert default.text == 'application/gml+xml'
assert len(supported) == 1
def test_bbox_output(self):
bbox = BoundingBoxOutput('bbox', 'BBox foo',
crss=["EPSG:4326"])
doc = bbox.describe_xml()
[outpt] = xpath_ns(doc, '/Output')
[default_crs] = xpath_ns(doc, './BoundingBoxOutput/Default/CRS')
supported = xpath_ns(doc, './BoundingBoxOutput/Supported/CRS')
assert default_crs.text == 'EPSG:4326'
assert len(supported) == 1
def load_tests(loader=None, tests=None, pattern=None):
if not loader:
loader = unittest.TestLoader()
suite_list = [
loader.loadTestsFromTestCase(DescribeProcessTest),
loader.loadTestsFromTestCase(DescribeProcessInputTest),
loader.loadTestsFromTestCase(InputDescriptionTest),
]
return unittest.TestSuite(suite_list)
|
mit
|
ojii/sandlib
|
lib/lib-python/2.7/test/test_typechecks.py
|
136
|
3166
|
"""Unit tests for __instancecheck__ and __subclasscheck__."""
import unittest
from test import test_support
class ABC(type):
def __instancecheck__(cls, inst):
"""Implement isinstance(inst, cls)."""
return any(cls.__subclasscheck__(c)
for c in set([type(inst), inst.__class__]))
def __subclasscheck__(cls, sub):
"""Implement issubclass(sub, cls)."""
candidates = cls.__dict__.get("__subclass__", set()) | set([cls])
return any(c in candidates for c in sub.mro())
class Integer:
__metaclass__ = ABC
__subclass__ = set([int])
class SubInt(Integer):
pass
class TypeChecksTest(unittest.TestCase):
def testIsSubclassInternal(self):
self.assertEqual(Integer.__subclasscheck__(int), True)
self.assertEqual(Integer.__subclasscheck__(float), False)
def testIsSubclassBuiltin(self):
self.assertEqual(issubclass(int, Integer), True)
self.assertEqual(issubclass(int, (Integer,)), True)
self.assertEqual(issubclass(float, Integer), False)
self.assertEqual(issubclass(float, (Integer,)), False)
def testIsInstanceBuiltin(self):
self.assertEqual(isinstance(42, Integer), True)
self.assertEqual(isinstance(42, (Integer,)), True)
self.assertEqual(isinstance(3.14, Integer), False)
self.assertEqual(isinstance(3.14, (Integer,)), False)
def testIsInstanceActual(self):
self.assertEqual(isinstance(Integer(), Integer), True)
self.assertEqual(isinstance(Integer(), (Integer,)), True)
def testIsSubclassActual(self):
self.assertEqual(issubclass(Integer, Integer), True)
self.assertEqual(issubclass(Integer, (Integer,)), True)
def testSubclassBehavior(self):
self.assertEqual(issubclass(SubInt, Integer), True)
self.assertEqual(issubclass(SubInt, (Integer,)), True)
self.assertEqual(issubclass(SubInt, SubInt), True)
self.assertEqual(issubclass(SubInt, (SubInt,)), True)
self.assertEqual(issubclass(Integer, SubInt), False)
self.assertEqual(issubclass(Integer, (SubInt,)), False)
self.assertEqual(issubclass(int, SubInt), False)
self.assertEqual(issubclass(int, (SubInt,)), False)
self.assertEqual(isinstance(SubInt(), Integer), True)
self.assertEqual(isinstance(SubInt(), (Integer,)), True)
self.assertEqual(isinstance(SubInt(), SubInt), True)
self.assertEqual(isinstance(SubInt(), (SubInt,)), True)
self.assertEqual(isinstance(42, SubInt), False)
self.assertEqual(isinstance(42, (SubInt,)), False)
def test_oldstyle(self):
# These should just be ignored.
class X:
def __instancecheck__(self, inst):
return True
def __subclasscheck__(self, cls):
return True
class Sub(X): pass
self.assertNotIsInstance(3, X)
self.assertIsInstance(X(), X)
self.assertFalse(issubclass(int, X))
self.assertTrue(issubclass(Sub, X))
def test_main():
test_support.run_unittest(TypeChecksTest)
if __name__ == "__main__":
unittest.main()
|
bsd-3-clause
|
RaRe-Technologies/gensim
|
gensim/test/svd_error.py
|
3
|
7390
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <[email protected]>
"""USAGE: %(program)s MATRIX.mm [CLIP_DOCS] [CLIP_TERMS]
Check truncated SVD error for the algo in gensim, using a given corpus. This script
runs the decomposition with several internal parameters (number of requested factors,
iterative chunk size) and reports error for each parameter combination.
The number of input documents is clipped to the first CLIP_DOCS. Similarly,
only the first CLIP_TERMS are considered (features with id >= CLIP_TERMS are
ignored, effectively restricting the vocabulary size). If you don't specify them,
the entire matrix will be used.
Example: ./svd_error.py ~/gensim/results/wiki_en_v10k.mm.bz2 100000 10000
"""
from __future__ import print_function, with_statement
import logging
import os
import sys
import time
import bz2
import itertools
import numpy as np
import scipy.linalg
import gensim
try:
from sparsesvd import sparsesvd
except ImportError:
# no SVDLIBC: install with `easy_install sparsesvd` if you want SVDLIBC results as well
sparsesvd = None
sparsesvd = None # don't use SVDLIBC
FACTORS = [300] # which num_topics to try
CHUNKSIZE = [10000, 1000] # which chunksize to try
POWER_ITERS = [0, 1, 2, 4, 6] # extra power iterations for the randomized algo
# when reporting reconstruction error, also report spectral norm error? (very slow)
COMPUTE_NORM2 = False
def norm2(a):
"""Spectral norm ("norm 2") of a symmetric matrix `a`."""
if COMPUTE_NORM2:
logging.info("computing spectral norm of a %s matrix", str(a.shape))
return scipy.linalg.eigvalsh(a).max() # much faster than np.linalg.norm(2)
else:
return np.nan
def rmse(diff):
return np.sqrt(1.0 * np.multiply(diff, diff).sum() / diff.size)
def print_error(name, aat, u, s, ideal_nf, ideal_n2):
err = -np.dot(u, np.dot(np.diag(s), u.T))
err += aat
nf, n2 = np.linalg.norm(err), norm2(err)
print(
'%s error: norm_frobenius=%f (/ideal=%g), norm2=%f (/ideal=%g), RMSE=%g' %
(name, nf, nf / ideal_nf, n2, n2 / ideal_n2, rmse(err))
)
sys.stdout.flush()
class ClippedCorpus:
def __init__(self, corpus, max_docs, max_terms):
self.corpus = corpus
self.max_docs, self.max_terms = max_docs, max_terms
def __iter__(self):
for doc in itertools.islice(self.corpus, self.max_docs):
yield [(f, w) for f, w in doc if f < self.max_terms]
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logging.info("running %s", " ".join(sys.argv))
program = os.path.basename(sys.argv[0])
# do we have enough cmd line arguments?
if len(sys.argv) < 2:
print(globals()["__doc__"] % locals())
sys.exit(1)
fname = sys.argv[1]
if fname.endswith('bz2'):
mm = gensim.corpora.MmCorpus(bz2.BZ2File(fname))
else:
mm = gensim.corpora.MmCorpus(fname)
# extra cmd parameters = use a subcorpus (fewer docs, smaller vocab)
if len(sys.argv) > 2:
n = int(sys.argv[2])
else:
n = mm.num_docs
if len(sys.argv) > 3:
m = int(sys.argv[3])
else:
m = mm.num_terms
logging.info("using %i documents and %i features", n, m)
corpus = ClippedCorpus(mm, n, m)
id2word = gensim.utils.FakeDict(m)
logging.info("computing corpus * corpus^T") # eigenvalues of this matrix are singular values of `corpus`, squared
aat = np.zeros((m, m), dtype=np.float64)
for chunk in gensim.utils.grouper(corpus, chunksize=5000):
num_nnz = sum(len(doc) for doc in chunk)
chunk = gensim.matutils.corpus2csc(chunk, num_nnz=num_nnz, num_terms=m, num_docs=len(chunk), dtype=np.float32)
chunk = chunk * chunk.T
chunk = chunk.toarray()
aat += chunk
del chunk
logging.info("computing full decomposition of corpus * corpus^t")
aat = aat.astype(np.float32)
spectrum_s, spectrum_u = scipy.linalg.eigh(aat)
spectrum_s = spectrum_s[::-1] # re-order to descending eigenvalue order
spectrum_u = spectrum_u.T[::-1].T
np.save(fname + '.spectrum.npy', spectrum_s)
for factors in FACTORS:
err = -np.dot(spectrum_u[:, :factors], np.dot(np.diag(spectrum_s[:factors]), spectrum_u[:, :factors].T))
err += aat
ideal_fro = np.linalg.norm(err)
del err
ideal_n2 = spectrum_s[factors + 1]
print('*' * 40, "%i factors, ideal error norm_frobenius=%f, norm_2=%f" % (factors, ideal_fro, ideal_n2))
print("*" * 30, end="")
print_error("baseline", aat,
np.zeros((m, factors)), np.zeros((factors)), ideal_fro, ideal_n2)
if sparsesvd:
logging.info("computing SVDLIBC SVD for %i factors", factors)
taken = time.time()
corpus_ram = gensim.matutils.corpus2csc(corpus, num_terms=m)
ut, s, vt = sparsesvd(corpus_ram, factors)
taken = time.time() - taken
del corpus_ram
del vt
u, s = ut.T.astype(np.float32), s.astype(np.float32)**2 # convert singular values to eigenvalues
del ut
print("SVDLIBC SVD for %i factors took %s s (spectrum %f .. %f)"
% (factors, taken, s[0], s[-1]))
print_error("SVDLIBC", aat, u, s, ideal_fro, ideal_n2)
del u
for power_iters in POWER_ITERS:
for chunksize in CHUNKSIZE:
logging.info(
"computing incremental SVD for %i factors, %i power iterations, chunksize %i",
factors, power_iters, chunksize
)
taken = time.time()
gensim.models.lsimodel.P2_EXTRA_ITERS = power_iters
model = gensim.models.LsiModel(
corpus, id2word=id2word, num_topics=factors,
chunksize=chunksize, power_iters=power_iters
)
taken = time.time() - taken
u, s = model.projection.u.astype(np.float32), model.projection.s.astype(np.float32)**2
del model
print(
"incremental SVD for %i factors, %i power iterations, "
"chunksize %i took %s s (spectrum %f .. %f)" %
(factors, power_iters, chunksize, taken, s[0], s[-1])
)
print_error('incremental SVD', aat, u, s, ideal_fro, ideal_n2)
del u
logging.info("computing multipass SVD for %i factors, %i power iterations", factors, power_iters)
taken = time.time()
model = gensim.models.LsiModel(
corpus, id2word=id2word, num_topics=factors, chunksize=2000,
onepass=False, power_iters=power_iters
)
taken = time.time() - taken
u, s = model.projection.u.astype(np.float32), model.projection.s.astype(np.float32)**2
del model
print(
"multipass SVD for %i factors, "
"%i power iterations took %s s (spectrum %f .. %f)" %
(factors, power_iters, taken, s[0], s[-1])
)
print_error('multipass SVD', aat, u, s, ideal_fro, ideal_n2)
del u
logging.info("finished running %s", program)
|
lgpl-2.1
|
omaciel/robottelo
|
tests/upgrades/test_remoteexecution.py
|
1
|
11012
|
"""Test for Remote Execution related Upgrade Scenario's
:Requirement: Upgraded Satellite
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: API
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
from nailgun import entities
from robottelo.constants import DEFAULT_LOC, DEFAULT_ORG, DISTRO_RHEL7
from robottelo.helpers import add_remote_execution_ssh_key
from robottelo.vm import VirtualMachine
from robottelo.test import APITestCase, settings
from upgrade_tests import post_upgrade, pre_upgrade
from upgrade_tests.helpers.scenarios import create_dict, get_entity_data
class Scenario_remoteexecution_external_capsule(APITestCase):
"""Test Remote Execution job created before migration runs successfully
post migration on a client registered with external capsule.
Test Steps:
1. Before Satellite upgrade:
a. Create Content host.
b. Create a Subnet on Satellite.
c. Install katello-ca on content host.
d. Register content host to Satellite.
e. add_ssh_key of external capsule on content host.
f. run a REX job on content host.
2. Upgrade satellite/capsule.
3. Run a rex Job again with same content host.
4. Check if REX job still getting success.
"""
@classmethod
def setUpClass(cls):
cls.libvirt_vm = settings.compute_resources.libvirt_hostname
cls.default_org_id = entities.Organization().search(
query={'search': 'name="{}"'.format(DEFAULT_ORG)})[0].id
cls.org = entities.Organization(id=cls.default_org_id).read()
cls.bridge = settings.vlan_networking.bridge
cls.subnet = settings.vlan_networking.subnet
cls.gateway = settings.vlan_networking.gateway
cls.netmask = settings.vlan_networking.netmask
cls.vm_domain_name = settings.upgrade.vm_domain
cls.vm_domain = entities.Domain().search(query={'search': 'name="{}"'
.format(cls.vm_domain_name)})
cls.proxy_name = settings.upgrade.rhev_cap_host or settings.upgrade.capsule_hostname
def _vm_cleanup(self, hostname=None):
""" Cleanup the VM from provisioning server
:param str hostname: The content host hostname
"""
if hostname:
vm = VirtualMachine(
hostname=hostname,
target_image=hostname,
provisioning_server=self.libvirt_vm,
distro=DISTRO_RHEL7,
)
vm._created = True
vm.destroy()
@pre_upgrade
def test_pre_scenario_remoteexecution_external_capsule(self):
"""Run REX job on client registered with external capsule
:id: preupgrade-261dd2aa-be01-4c34-b877-54b8ee346561
:steps:
1. Create Subnet.
2. Create Content host.
3. Install katello-ca package and register to Satellite host.
4. add rex ssh_key of external capsule on content host.
5. run the REX job on client vm.
:expectedresults:
1. Content host should create with pre-required details.
2. REX job should run on it.
"""
try:
default_loc_id = entities.Location().search(
query={'search': 'name="{}"'.format(DEFAULT_LOC)})[0].id
sn = entities.Subnet(
domain=self.vm_domain,
gateway=self.gateway,
ipam='DHCP',
location=[default_loc_id],
mask=self.netmask,
network=self.subnet,
organization=[self.org.id],
remote_execution_proxy=[entities.SmartProxy(id=2)],
).create()
client = VirtualMachine(
distro=DISTRO_RHEL7,
provisioning_server=self.libvirt_vm,
bridge=self.bridge)
client.create()
client.install_capsule_katello_ca(capsule=self.proxy_name)
client.register_contenthost(org=self.org.label, lce='Library')
add_remote_execution_ssh_key(hostname=client.ip_addr,
proxy_hostname=self.proxy_name)
host = entities.Host().search(
query={'search': 'name="{}"'.format(client.hostname)})
host[0].subnet = sn
host[0].update(['subnet'])
job = entities.JobInvocation().run(data={
'job_template_id': 89, 'inputs': {'command': "ls"},
'targeting_type': 'static_query', 'search_query': "name = {0}"
.format(client.hostname)})
self.assertEqual(job['output']['success_count'], 1)
global_dict = {
self.__class__.__name__: {'client_name': client.hostname}
}
create_dict(global_dict)
except Exception as exp:
if client._created:
self._vm_cleanup(hostname=client.hostname)
raise Exception(exp)
@post_upgrade(depend_on=test_pre_scenario_remoteexecution_external_capsule)
def test_post_scenario_remoteexecution_external_capsule(self):
"""Run a REX job on pre-upgrade created client registered
with external capsule.
:id: postupgrade-00ed2a25-b0bd-446f-a3fc-09149c57fe94
:steps:
1. Run a REX job on content host.
:expectedresults:
1. The job should successfully executed on pre-upgrade created client.
"""
client_name = get_entity_data(self.__class__.__name__)['client_name']
job = entities.JobInvocation().run(data={
'job_template_id': 89, 'inputs': {'command': "ls"},
'targeting_type': 'static_query', 'search_query': "name = {0}".format(client_name)})
self.assertEqual(job['output']['success_count'], 1)
self._vm_cleanup(hostname=client_name)
class Scenario_remoteexecution_satellite(APITestCase):
"""Test Remote Execution job created before migration runs successfully
post migration on a client registered with Satellite.
Test Steps:
1. Before Satellite upgrade:
2. Create Content host.
3. Create a Subnet on Satellite.
4. Install katello-ca on content host.
5. Register content host to Satellite.
6. Add_ssh_key of Satellite on content host.
7. Run a REX job on content host.
8. Upgrade satellite/capsule.
9. Run a rex Job again with same content host.
10. Check if REX job still getting success.
"""
@classmethod
def setUpClass(cls):
cls.libvirt_vm = settings.compute_resources.libvirt_hostname
cls.default_org_id = entities.Organization().search(
query={'search': 'name="{}"'.format(DEFAULT_ORG)})[0].id
cls.org = entities.Organization(id=cls.default_org_id).read()
cls.bridge = settings.vlan_networking.bridge
cls.subnet = settings.vlan_networking.subnet
cls.gateway = settings.vlan_networking.gateway
cls.netmask = settings.vlan_networking.netmask
cls.vm_domain_name = settings.upgrade.vm_domain
cls.vm_domain = entities.Domain().search(query={'search': 'name="{}"'
.format(cls.vm_domain_name)})
cls.proxy_name = settings.server.hostname
def _vm_cleanup(self, hostname=None):
""" Cleanup the VM from provisioning server
:param str hostname: The content host hostname
"""
if hostname:
vm = VirtualMachine(
hostname=hostname,
target_image=hostname,
provisioning_server=self.libvirt_vm,
distro=DISTRO_RHEL7,
)
vm._created = True
vm.destroy()
@pre_upgrade
def test_pre_scenario_remoteexecution_satellite(self):
"""Run REX job on client registered with Satellite
:id: preupgrade-3f338475-fa69-43ef-ac86-f00f4d324b33
:steps:
1. Create Subnet.
2. Create Content host.
3. Install katello-ca package and register to Satellite host.
4. Add rex ssh_key of Satellite on content host.
5. Run the REX job on client vm.
:expectedresults:
1. It should create with pre-required details.
2. REX job should run on it.
"""
try:
default_loc_id = entities.Location().search(
query={'search': 'name="{}"'.format(DEFAULT_LOC)})[0].id
sn = entities.Subnet(
domain=self.vm_domain,
gateway=self.gateway,
ipam='DHCP',
location=[default_loc_id],
mask=self.netmask,
network=self.subnet,
organization=[self.org.id],
remote_execution_proxy=[entities.SmartProxy(id=1)],
).create()
client = VirtualMachine(
distro=DISTRO_RHEL7,
provisioning_server=self.libvirt_vm,
bridge=self.bridge)
client.create()
client.install_katello_ca()
client.register_contenthost(org=self.org.label, lce='Library')
add_remote_execution_ssh_key(hostname=client.ip_addr)
host = entities.Host().search(
query={'search': 'name="{}"'.format(client.hostname)})
host[0].subnet = sn
host[0].update(['subnet'])
job = entities.JobInvocation().run(data={
'job_template_id': 89, 'inputs': {'command': "ls"},
'targeting_type': 'static_query', 'search_query': "name = {0}"
.format(client.hostname)})
self.assertEqual(job['output']['success_count'], 1)
global_dict = {
self.__class__.__name__: {'client_name': client.hostname}
}
create_dict(global_dict)
except Exception as exp:
if client._created:
self._vm_cleanup(hostname=client.hostname)
raise Exception(exp)
@post_upgrade(depend_on=test_pre_scenario_remoteexecution_satellite)
def test_post_scenario_remoteexecution_satellite(self):
"""Run a REX job on pre-upgrade created client registered
with Satellite.
:id: postupgrade-ad3b1564-d3e6-4ada-9337-3a6ee6863bae
:steps:
1. Run a REX job on content host.
:expectedresults:
1. The job should successfully executed on pre-upgrade created client.
"""
client_name = get_entity_data(self.__class__.__name__)['client_name']
job = entities.JobInvocation().run(data={
'job_template_id': 89, 'inputs': {'command': "ls"},
'targeting_type': 'static_query', 'search_query': "name = {0}".format(client_name)})
self.assertEqual(job['output']['success_count'], 1)
self._vm_cleanup(hostname=client_name)
|
gpl-3.0
|
Tesora-Release/tesora-trove
|
trove/guestagent/datastore/db2/system.py
|
3
|
2554
|
# Copyright 2015 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
TIMEOUT = 1200
DB2_INSTANCE_OWNER = "db2inst1"
UPDATE_HOSTNAME = (
'source /home/db2inst1/sqllib/db2profile;'
'db2set -g DB2SYSTEM="$(hostname)"')
ENABLE_AUTOSTART = (
"/opt/ibm/db2/V10.5/instance/db2iauto -on " + DB2_INSTANCE_OWNER)
DISABLE_AUTOSTART = (
"/opt/ibm/db2/V10.5/instance/db2iauto -off " + DB2_INSTANCE_OWNER)
START_DB2 = "db2start"
QUIESCE_DB2 = ("db2 QUIESCE INSTANCE DB2INST1 RESTRICTED ACCESS IMMEDIATE "
"FORCE CONNECTIONS")
UNQUIESCE_DB2 = "db2 UNQUIESCE INSTANCE DB2INST1"
STOP_DB2 = "db2 force application all; db2 terminate; db2stop"
DB2_STATUS = ("ps -ef | grep " + DB2_INSTANCE_OWNER + " | grep db2sysc |"
"grep -v grep | wc -l")
CREATE_DB_COMMAND = "db2 create database %(dbname)s"
DELETE_DB_COMMAND = "db2 drop database %(dbname)s"
LIST_DB_COMMAND = (
"db2 list database directory | grep -B6 -i indirect | "
"grep 'Database name' | sed 's/.*= //'")
CREATE_USER_COMMAND = (
'sudo useradd -m -d /home/%(login)s %(login)s;'
'sudo echo %(login)s:%(passwd)s |sudo chpasswd')
GRANT_USER_ACCESS = (
"db2 connect to %(dbname)s; "
"db2 GRANT DBADM,CREATETAB,BINDADD,CONNECT,DATAACCESS "
"ON DATABASE TO USER %(login)s; db2 connect reset")
DELETE_USER_COMMAND = 'sudo userdel -r %(login)s'
REVOKE_USER_ACCESS = (
"db2 connect to %(dbname)s; "
"db2 REVOKE DBADM,CREATETAB,BINDADD,CONNECT,DATAACCESS "
"ON DATABASE FROM USER %(login)s; db2 connect reset")
LIST_DB_USERS = (
"db2 +o connect to %(dbname)s; "
"db2 -x select grantee, dataaccessauth from sysibm.sysdbauth; "
"db2 connect reset")
BACKUP_DB = "db2 backup database %(dbname)s to %(dir)s"
RESTORE_DB = (
"db2 restore database %(dbname)s from %(dir)s")
GET_DB_SIZE = (
"db2 connect to %(dbname)s;"
"db2 call get_dbsize_info(?, ?, ?, -1) ")
GET_DB_NAMES = ("find /home/db2inst1/db2inst1/backup/ -type f -name '*.001' |"
" grep -Po \"(?<=backup/)[^.']*(?=\.)\"")
|
apache-2.0
|
mfherbst/spack
|
lib/spack/spack/test/flag_handlers.py
|
2
|
7054
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import pytest
import os
import spack.spec
import spack.repo
import spack.build_environment
@pytest.fixture()
def temp_env():
old_env = os.environ.copy()
yield
os.environ = old_env
def add_o3_to_build_system_cflags(pkg, name, flags):
build_system_flags = []
if name == 'cflags':
build_system_flags.append('-O3')
return (flags, None, build_system_flags)
@pytest.mark.usefixtures('config')
class TestFlagHandlers(object):
def test_no_build_system_flags(self, temp_env):
# Test that both autotools and cmake work getting no build_system flags
s1 = spack.spec.Spec('callpath')
s1.concretize()
pkg1 = spack.repo.get(s1)
spack.build_environment.setup_package(pkg1, False)
s2 = spack.spec.Spec('libelf')
s2.concretize()
pkg2 = spack.repo.get(s2)
spack.build_environment.setup_package(pkg2, False)
# Use cppflags as a canary
assert 'SPACK_CPPFLAGS' not in os.environ
assert 'CPPFLAGS' not in os.environ
def test_unbound_method(self, temp_env):
# Other tests test flag_handlers set as bound methods and functions.
# This tests an unbound method in python2 (no change in python3).
s = spack.spec.Spec('mpileaks cppflags=-g')
s.concretize()
pkg = spack.repo.get(s)
pkg.flag_handler = pkg.__class__.inject_flags
spack.build_environment.setup_package(pkg, False)
assert os.environ['SPACK_CPPFLAGS'] == '-g'
assert 'CPPFLAGS' not in os.environ
def test_inject_flags(self, temp_env):
s = spack.spec.Spec('mpileaks cppflags=-g')
s.concretize()
pkg = spack.repo.get(s)
pkg.flag_handler = pkg.inject_flags
spack.build_environment.setup_package(pkg, False)
assert os.environ['SPACK_CPPFLAGS'] == '-g'
assert 'CPPFLAGS' not in os.environ
def test_env_flags(self, temp_env):
s = spack.spec.Spec('mpileaks cppflags=-g')
s.concretize()
pkg = spack.repo.get(s)
pkg.flag_handler = pkg.env_flags
spack.build_environment.setup_package(pkg, False)
assert os.environ['CPPFLAGS'] == '-g'
assert 'SPACK_CPPFLAGS' not in os.environ
def test_build_system_flags_cmake(self, temp_env):
s = spack.spec.Spec('callpath cppflags=-g')
s.concretize()
pkg = spack.repo.get(s)
pkg.flag_handler = pkg.build_system_flags
spack.build_environment.setup_package(pkg, False)
assert 'SPACK_CPPFLAGS' not in os.environ
assert 'CPPFLAGS' not in os.environ
expected = set(['-DCMAKE_C_FLAGS=-g', '-DCMAKE_CXX_FLAGS=-g',
'-DCMAKE_Fortran_FLAGS=-g'])
assert set(pkg.cmake_flag_args) == expected
def test_build_system_flags_autotools(self, temp_env):
s = spack.spec.Spec('libelf cppflags=-g')
s.concretize()
pkg = spack.repo.get(s)
pkg.flag_handler = pkg.build_system_flags
spack.build_environment.setup_package(pkg, False)
assert 'SPACK_CPPFLAGS' not in os.environ
assert 'CPPFLAGS' not in os.environ
assert 'CPPFLAGS=-g' in pkg.configure_flag_args
def test_build_system_flags_not_implemented(self, temp_env):
s = spack.spec.Spec('mpileaks cppflags=-g')
s.concretize()
pkg = spack.repo.get(s)
pkg.flag_handler = pkg.build_system_flags
# Test the command line flags method raises a NotImplementedError
try:
spack.build_environment.setup_package(pkg, False)
assert False
except NotImplementedError:
assert True
def test_add_build_system_flags_autotools(self, temp_env):
s = spack.spec.Spec('libelf cppflags=-g')
s.concretize()
pkg = spack.repo.get(s)
pkg.flag_handler = add_o3_to_build_system_cflags
spack.build_environment.setup_package(pkg, False)
assert '-g' in os.environ['SPACK_CPPFLAGS']
assert 'CPPFLAGS' not in os.environ
assert pkg.configure_flag_args == ['CFLAGS=-O3']
def test_add_build_system_flags_cmake(self, temp_env):
s = spack.spec.Spec('callpath cppflags=-g')
s.concretize()
pkg = spack.repo.get(s)
pkg.flag_handler = add_o3_to_build_system_cflags
spack.build_environment.setup_package(pkg, False)
assert '-g' in os.environ['SPACK_CPPFLAGS']
assert 'CPPFLAGS' not in os.environ
assert pkg.cmake_flag_args == ['-DCMAKE_C_FLAGS=-O3']
def test_ld_flags_cmake(self, temp_env):
s = spack.spec.Spec('callpath ldflags=-mthreads')
s.concretize()
pkg = spack.repo.get(s)
pkg.flag_handler = pkg.build_system_flags
spack.build_environment.setup_package(pkg, False)
assert 'SPACK_LDFLAGS' not in os.environ
assert 'LDFLAGS' not in os.environ
expected = set(['-DCMAKE_EXE_LINKER_FLAGS=-mthreads',
'-DCMAKE_MODULE_LINKER_FLAGS=-mthreads',
'-DCMAKE_SHARED_LINKER_FLAGS=-mthreads',
'-DCMAKE_STATIC_LINKER_FLAGS=-mthreads'])
assert set(pkg.cmake_flag_args) == expected
def test_ld_libs_cmake(self, temp_env):
s = spack.spec.Spec('callpath ldlibs=-lfoo')
s.concretize()
pkg = spack.repo.get(s)
pkg.flag_handler = pkg.build_system_flags
spack.build_environment.setup_package(pkg, False)
assert 'SPACK_LDLIBS' not in os.environ
assert 'LDLIBS' not in os.environ
expected = set(['-DCMAKE_C_STANDARD_LIBRARIES=-lfoo',
'-DCMAKE_CXX_STANDARD_LIBRARIES=-lfoo',
'-DCMAKE_Fortran_STANDARD_LIBRARIES=-lfoo'])
assert set(pkg.cmake_flag_args) == expected
|
lgpl-2.1
|
zhangyake/python-itchat
|
test/songCloudImage.py
|
1
|
2058
|
'''
通过用户id 获取网易云音乐用户听歌历史
歌曲云图
'''
import time
from os import path
from selenium import webdriver
import numpy as np
from bs4 import BeautifulSoup
from PIL import Image
from wordcloud import WordCloud
class SongCloudImage(object):
baseUrl = 'http://music.163.com/#/user/songs/rank?id={}'
def __init__(self, user_id):
self.user_id = user_id
self.__url = SongCloudImage.baseUrl.format(user_id)
def show(self,show=True,save=False,all=True):
driver = webdriver.PhantomJS()
# 打开链接
driver.get(self.__url)
time.sleep(2)
# 选择iframe
driver.switch_to.frame("g_iframe")
# 执行js切换用户所有历史听歌记录
if all:
driver.execute_script("document.getElementById('songsall').click()")
time.sleep(2)
# 获取页面源码
html = driver.page_source
# 解析源码
soup = BeautifulSoup(html, 'html.parser')
songlists = []
text = ''
for li in soup.find_all('li'):
song_info = li.find(name='span', attrs='txt')
if song_info:
song = {'name': song_info.find('b').get_text(), 'singer': song_info.find_all(attrs='s-fc8')[-1].get_text(), 'score': int(li.find(name='span', attrs='bg').get('style')[6:-2])}
songlists.append((song.get('name') + ' ') * song.get('score'))
d = path.dirname(__file__)
text = ' '.join(songlists)
mask = np.array(Image.open(path.join(d, "heart-mask.jpg")))
wordcloud = WordCloud(font_path=path.join(d, 'STXINGKA.TTF'),mask=mask,random_state=30, min_font_size=8, max_font_size=56, width=900, height=900, background_color=(255, 255, 255)).generate(text)
image = wordcloud.to_image()
if show:
image.show()
if save:
image.save('{}.png'.format(self.user_id))
if __name__ == '__main__':
songCloudImage = SongCloudImage(125090772)
songCloudImage.show(all=True,save=True)
|
mit
|
manassolanki/erpnext
|
erpnext/setup/install.py
|
9
|
2924
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import print_function, unicode_literals
import frappe
from erpnext.accounts.doctype.cash_flow_mapper.default_cash_flow_mapper import DEFAULT_MAPPERS
from .default_success_action import get_default_success_action
from frappe import _
from frappe.desk.page.setup_wizard.setup_wizard import add_all_roles_to
from frappe.custom.doctype.custom_field.custom_field import create_custom_field
default_mail_footer = """<div style="padding: 7px; text-align: right; color: #888"><small>Sent via
<a style="color: #888" href="http://erpnext.org">ERPNext</a></div>"""
def after_install():
frappe.get_doc({'doctype': "Role", "role_name": "Analytics"}).insert()
set_single_defaults()
create_compact_item_print_custom_field()
create_print_zero_amount_taxes_custom_field()
add_all_roles_to("Administrator")
create_default_cash_flow_mapper_templates()
create_default_success_action()
frappe.db.commit()
def check_setup_wizard_not_completed():
if frappe.db.get_default('desktop:home_page') == 'desktop':
print()
print("ERPNext can only be installed on a fresh site where the setup wizard is not completed")
print("You can reinstall this site (after saving your data) using: bench --site [sitename] reinstall")
print()
return False
def set_single_defaults():
for dt in ('Accounts Settings', 'Print Settings', 'HR Settings', 'Buying Settings',
'Selling Settings', 'Stock Settings'):
default_values = frappe.db.sql("""select fieldname, `default` from `tabDocField`
where parent=%s""", dt)
if default_values:
try:
b = frappe.get_doc(dt, dt)
for fieldname, value in default_values:
b.set(fieldname, value)
b.save()
except frappe.MandatoryError:
pass
except frappe.ValidationError:
pass
frappe.db.set_default("date_format", "dd-mm-yyyy")
def create_compact_item_print_custom_field():
create_custom_field('Print Settings', {
'label': _('Compact Item Print'),
'fieldname': 'compact_item_print',
'fieldtype': 'Check',
'default': 1,
'insert_after': 'with_letterhead'
})
def create_print_zero_amount_taxes_custom_field():
create_custom_field('Print Settings', {
'label': _('Print taxes with zero amount'),
'fieldname': 'print_taxes_with_zero_amount',
'fieldtype': 'Check',
'default': 0,
'insert_after': 'allow_print_for_cancelled'
})
def create_default_cash_flow_mapper_templates():
for mapper in DEFAULT_MAPPERS:
if not frappe.db.exists('Cash Flow Mapper', mapper['section_name']):
doc = frappe.get_doc(mapper)
doc.insert(ignore_permissions=True)
def create_default_success_action():
for success_action in get_default_success_action():
if not frappe.db.exists('Success Action', success_action.get("ref_doctype")):
doc = frappe.get_doc(success_action)
doc.insert(ignore_permissions=True)
|
gpl-3.0
|
Paulius-Maruska/python-isign
|
src/isign/connection.py
|
1
|
1919
|
from typing import Dict, Union
import requests
from .environment import (
get_default_environment,
ISignEnvironment,
)
from .error import ISignError
class ISignConnection:
def __init__(self,
access_token: str,
user_agent: str = "Python iSign",
environment: Union[str, ISignEnvironment] = "sandbox"
) -> None:
self.access_token = access_token
self.user_agent = user_agent
if isinstance(environment, str):
self.environment = get_default_environment(environment)
elif isinstance(environment, ISignEnvironment):
self.environment = environment
else:
raise ValueError("environment must be either str name or an instance of ISignEnvironment")
def __repr__(self) -> str:
return (f"ISignConnection("
f"access_token={self.access_token!r}, "
f"user_agent={self.user_agent!r}, "
f"environment={self.environment!r})")
def __str__(self) -> str:
return f"< isign conn for {self.environment} >"
def get(self, path: str) -> Dict:
url = self.environment.construct_url(self.access_token, path)
hdr = {"User-Agent": self.user_agent}
response = requests.get(url, headers=hdr)
if response.status_code >= 400:
raise ISignError("GET", path, response.status_code, response.json())
result: Dict = response.json()
return result
def post(self, path: str, content: Dict) -> Dict:
url = self.environment.construct_url(self.access_token, path)
hdr = {"User-Agent": self.user_agent}
response = requests.post(url, headers=hdr, json=content)
if response.status_code >= 400:
raise ISignError("POST", path, response.status_code, response.json())
result: Dict = response.json()
return result
|
mit
|
vhumpa/dogtail
|
dogtail/wrapped.py
|
2
|
1083
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import Accessibility
"""
Superclasses for application wrappers
Subclass these classes if you want to create application wrappers, e.g.:
http://svn.gnome.org/viewvc/dogtail-tests/trunk/appwrappers/dogtail/appwrappers/gedit.py?view=markup
"""
__author__ = "Zack Cerza <[email protected]>"
def makeWrapperClass(wrappedClass, name): # pragma: no cover
class klass(object):
def __init__(self, obj):
self.obj = obj
def __getattr__(self, name):
if name == 'obj':
return self.__dict__['obj']
return getattr(self.obj, name)
def __setattr__(self, name, value):
if name == 'obj':
self.__dict__['obj'] = value
else:
return setattr(self.obj, name, value)
klass.__name__ = name
return klass
Application = makeWrapperClass(Accessibility.Application, "WrappedApplication")
Node = makeWrapperClass(Accessibility.Accessible, "WrappedNode")
|
gpl-2.0
|
USCDataScience/polar.usc.edu
|
html/team3-a3/app.py
|
1
|
3694
|
from flask import Flask, jsonify, send_from_directory, redirect, request
import requests
import os
url = 'http://54.183.25.174:9200/polar.usc.edu/_search?q=*:*&size=10000&fields='
#res = requests.get()
#result = res.json()['hits']['hits']
#print len(result)
data = []
with open('datasize.json', 'rb') as f:
data = eval(f.read())
app = Flask(__name__)
app = Flask(__name__, static_url_path='')
@app.route('/app/<path:path>', methods=['GET'])
def static_proxy(path):
return send_from_directory('app', path)
@app.route('/app/', methods=['GET'])
def static_index():
return send_from_directory('app', 'index.html')
@app.route('/api/sweets')
def send_keywords():
return jsonify({"data": get_keywords()})
@app.route('/api/sizeratio')
def send_sizeratio():
return jsonify({"data": get_sizeratio()})
@app.route('/api/request')
def send_request():
input = request.args.get('input')
requesturls = []
for row in data:
try:
if input in row['taxonomy']['label']:
requesturls.append(row['requestURL'])
except:
continue
print requesturls
return jsonify({"data": requesturls})
def get_sizeratio():
ratio = []
res = requests.get(url+'id')
result = res.json()['hits']['hits']
for row in result:
try:
resin = requests.get('http://54.183.25.174:9200/polar.usc.edu/metadata/'
+ row['_id'])
resultin = resin.json()['_source']
with open('temp.txt', 'wb') as f:
f.write(str(resultin))
indexsize = os.path.getsize('temp.txt')
bodysize = resultin['bodyFileSize']
tempratio = float(indexsize/(bodysize*1.0))
ratio.append(tempratio)
print tempratio
except:
continue
return ratio
@app.route('/api/mimetypes')
def send_mimetypes():
return jsonify({"data": get_mimetypes()})
def get_mimetypes():
dict_count_mime = {}
dict_count_mime_tika = {}
res = requests.get(url+'tikaMetaData.Content-Type,cbormime')
result = res.json()['hits']['hits']
for row in result:
if 'fields' in row:
for mime in row['fields']['tikaMetaData.Content-Type']:
temp = mime.split(';')[0]
if temp in dict_count_mime_tika:
count = dict_count_mime_tika[temp]
dict_count_mime_tika[temp] = count+1
else:
dict_count_mime_tika[temp] = 1
for mime in row['fields']['cbormime']:
temp = mime.split(';')[0]
if temp in dict_count_mime:
count = dict_count_mime[temp]
dict_count_mime[temp] = count+1
else:
dict_count_mime[temp] = 1
final = []
final.append(dict_count_mime)
final.append(dict_count_mime_tika)
return final
def get_keywords():
dict_count_owl = {}
res = requests.get(url+'owlconcepts')
result = res.json()['hits']['hits']
for row in result:
if 'fields' in row:
for concept in row['fields']['owlconcepts']:
if concept in dict_count_owl:
count = dict_count_owl[concept]
dict_count_owl[concept] = count+1
else:
dict_count_owl[concept] = 1
frequency_list_for_d3 = []
for key in dict_count_owl:
temp = {}
temp['text'] = key
temp['size'] = dict_count_owl[key]
frequency_list_for_d3.append(temp)
return frequency_list_for_d3
if __name__ == '__main__':
app.run(debug=True)
|
apache-2.0
|
marckuz/django
|
django/contrib/auth/urls.py
|
568
|
1036
|
# The views used below are normally mapped in django.contrib.admin.urls.py
# This URLs file is used to provide a reliable view deployment for test purposes.
# It is also provided as a convenience to those who want to deploy these URLs
# elsewhere.
from django.conf.urls import url
from django.contrib.auth import views
urlpatterns = [
url(r'^login/$', views.login, name='login'),
url(r'^logout/$', views.logout, name='logout'),
url(r'^password_change/$', views.password_change, name='password_change'),
url(r'^password_change/done/$', views.password_change_done, name='password_change_done'),
url(r'^password_reset/$', views.password_reset, name='password_reset'),
url(r'^password_reset/done/$', views.password_reset_done, name='password_reset_done'),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
views.password_reset_confirm, name='password_reset_confirm'),
url(r'^reset/done/$', views.password_reset_complete, name='password_reset_complete'),
]
|
bsd-3-clause
|
hoffmabc/OpenBazaar-Server
|
db/tests/test_datastore.py
|
2
|
3513
|
import unittest
import os
from db import datastore
from protos.objects import Profile, Listings
from protos.countries import CountryCode
class DatastoreTest(unittest.TestCase):
def setUp(self):
datastore.create_database("test.db")
datastore.DATABASE = "test.db"
self.test_hash = "87e0555568bf5c7e4debd6645fc3f41e88df6ca8"
self.test_hash2 = "97e0555568bf5c7e4debd6645fc3f41e88df6ca8"
self.test_file = "Contents of test.txt"
self.test_file2 = "Contents of test2.txt"
self.sp = Profile()
self.sp.name = "Test User"
self.sp.encryption_key = "Key"
self.sp.location = CountryCode.Value('UNITED_STATES')
self.serialized_listings = Listings()
self.lm = self.serialized_listings.ListingMetadata()
self.lm.contract_hash = self.test_hash
self.lm.title = "TEST CONTRACT TITLE"
self.lm.price = 0
self.lm.currency_code = "USD"
self.lm.nsfw = False
self.lm.origin = CountryCode.Value('ALL')
self.hm = datastore.HashMap()
self.hm.delete_all()
self.ps = datastore.ProfileStore()
self.ls = datastore.ListingsStore()
self.ks = datastore.KeyStore()
def tearDown(self):
os.remove("test.db")
def test_hashmapInsert(self):
self.hm.insert(self.test_hash, self.test_file)
f = self.hm.get_file(self.test_hash)
self.assertEqual(f, self.test_file)
def test_hashmapGetEmpty(self):
f = self.hm.get_file('87e0555568bf5c7e4debd6645fc3f41e88df6ca9')
self.assertEqual(f, None)
def test_hashmapGetAll(self):
# Get All from empty datastore
self.hm.delete_all()
f = self.hm.get_all()
self.assertEqual(0, len(f))
# Get All from populated datastore
self.hm.insert(self.test_hash, self.test_file)
self.hm.insert(self.test_hash2, self.test_file2)
f = self.hm.get_all()
self.assertIn((self.test_hash, self.test_file), f)
self.assertIn((self.test_hash2, self.test_file2), f)
def test_setProto(self):
self.ps.set_proto(self.sp.SerializeToString())
sp = self.ps.get_proto()
val = Profile()
val.ParseFromString(sp)
self.assertEqual(self.sp, val)
def test_addListing(self):
self.ls.delete_all_listings()
self.ls.add_listing(self.lm)
l = self.ls.get_proto()
val = Listings()
val.ParseFromString(l)
self.assertEqual(self.lm, val.listing[0])
def test_deleteListing(self):
self.ls.delete_all_listings()
self.ls.add_listing(self.lm)
self.ls.delete_listing(self.test_hash)
l = self.ls.get_proto()
val = Listings()
val.ParseFromString(l)
self.assertEqual(0, len(val.listing))
# Try to delete when table is already empty
self.ls.delete_all_listings()
self.assertEqual(None, self.ls.delete_listing(self.test_hash))
def test_setGUIDKey(self):
self.ks.set_key("guid", "privkey", "signed_privkey")
key = self.ks.get_key("guid")
self.assertEqual(("privkey", "signed_privkey"), key)
def test_setBitcoinKey(self):
self.ks.set_key("bitcoin", "privkey", "signed_privkey")
key = self.ks.get_key("bitcoin")
self.assertEqual(("privkey", "signed_privkey"), key)
def test_getKeyFromEmptyTable(self):
self.ks.delete_all_keys()
self.assertEqual(None, self.ks.get_key("guid"))
|
mit
|
ohio813/androguard
|
androguard/decompiler/dad/dataflow.py
|
34
|
20280
|
# This file is part of Androguard.
#
# Copyright (c) 2012 Geoffroy Gueguen <[email protected]>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from collections import defaultdict
from androguard.decompiler.dad.instruction import (Variable, ThisParam,
Param)
from androguard.decompiler.dad.util import build_path, common_dom
from androguard.decompiler.dad.node import Node
logger = logging.getLogger('dad.control_flow')
class BasicReachDef(object):
def __init__(self, graph, params):
self.g = graph
self.A = defaultdict(set)
self.R = defaultdict(set)
self.DB = defaultdict(set)
self.defs = defaultdict(lambda: defaultdict(set))
self.def_to_loc = defaultdict(set)
# Deal with special entry node
entry = graph.entry
self.A[entry] = range(-1, -len(params) - 1, -1)
for loc, param in enumerate(params, 1):
self.defs[entry][param].add(-loc)
self.def_to_loc[param].add(-loc)
# Deal with the other nodes
for node in graph.rpo:
for i, ins in node.get_loc_with_ins():
kill = ins.get_lhs()
if kill is not None:
self.defs[node][kill].add(i)
self.def_to_loc[kill].add(i)
for defs, values in self.defs[node].items():
self.DB[node].add(max(values))
def run(self):
nodes = self.g.rpo[:]
while nodes:
node = nodes.pop(0)
newR = set()
for pred in self.g.all_preds(node):
newR.update(self.A[pred])
if newR and newR != self.R[node]:
self.R[node] = newR
for suc in self.g.all_sucs(node):
if suc not in nodes:
nodes.append(suc)
killed_locs = set()
for reg in self.defs[node]:
killed_locs.update(self.def_to_loc[reg])
A = set()
for loc in self.R[node]:
if loc not in killed_locs:
A.add(loc)
newA = A.union(self.DB[node])
if newA != self.A[node]:
self.A[node] = newA
for suc in self.g.all_sucs(node):
if suc not in nodes:
nodes.append(suc)
def update_chain(graph, loc, du, ud):
'''
Updates the DU chain of the instruction located at loc such that there is
no more reference to it so that we can remove it.
When an instruction is found to be dead (i.e it has no side effect, and the
register defined is not used) we have to update the DU chain of all the
variables that may me used by the dead instruction.
'''
ins = graph.get_ins_from_loc(loc)
for var in ins.get_used_vars():
# We get the definition points of the current variable
for def_loc in set(ud[(var, loc)]):
# We remove the use of the variable at loc from the DU chain of
# the variable definition located at def_loc
du[(var, def_loc)].remove(loc)
ud[(var, loc)].remove(def_loc)
if not ud.get((var, loc)):
ud.pop((var, loc))
# If the DU chain of the defined variable is now empty, this means
# that we may have created a new dead instruction, so we check that
# the instruction has no side effect and we update the DU chain of
# the new dead instruction, and we delete it.
# We also make sure that def_loc is not < 0. This is the case when
# the current variable is a method parameter.
if def_loc >= 0 and not du[(var, def_loc)]:
du.pop((var, def_loc))
def_ins = graph.get_ins_from_loc(def_loc)
if def_ins.is_call():
def_ins.remove_defined_var()
elif def_ins.has_side_effect():
continue
else:
update_chain(graph, def_loc, du, ud)
graph.remove_ins(def_loc)
def dead_code_elimination(graph, du, ud):
'''
Run a dead code elimination pass.
Instructions are checked to be dead. If it is the case, we remove them and
we update the DU & UD chains of its variables to check for further dead
instructions.
'''
for node in graph.rpo:
for i, ins in node.get_loc_with_ins()[:]:
reg = ins.get_lhs()
if reg is not None:
# If the definition is not used, we check that the instruction
# has no side effect. If there is one and this is a call, we
# remove only the unused defined variable. else, this is
# something like an array access, so we do nothing.
# Otherwise (no side effect) we can remove the instruction from
# the node.
if (reg, i) not in du:
if ins.is_call():
ins.remove_defined_var()
elif ins.has_side_effect():
continue
else:
# We can delete the instruction. First update the DU
# chain of the variables used by the instruction to
# `let them know` that they are not used anymore by the
# deleted instruction.
# Then remove the instruction.
update_chain(graph, i, du, ud)
graph.remove_ins(i)
def clear_path_node(graph, reg, loc1, loc2):
for loc in xrange(loc1, loc2):
ins = graph.get_ins_from_loc(loc)
logger.debug(' treat loc: %d, ins: %s', loc, ins)
if ins is None:
continue
logger.debug(' LHS: %s, side_effect: %s', ins.get_lhs(),
ins.has_side_effect())
if ins.get_lhs() == reg or ins.has_side_effect():
return False
return True
def clear_path(graph, reg, loc1, loc2):
'''
Check that the path from loc1 to loc2 is clear.
We have to check that there is no side effect between the two location
points. We also have to check that the variable `reg` is not redefined
along one of the possible pathes from loc1 to loc2.
'''
logger.debug('clear_path: reg(%s), loc1(%s), loc2(%s)', reg, loc1, loc2)
node1 = graph.get_node_from_loc(loc1)
node2 = graph.get_node_from_loc(loc2)
# If both instructions are in the same node, we only have to check that the
# path is clear inside the node
if node1 is node2:
return clear_path_node(graph, reg, loc1 + 1, loc2)
# If instructions are in different nodes, we also have to check the nodes
# in the path between the two locations.
if not clear_path_node(graph, reg, loc1 + 1, node1.ins_range[1]):
return False
path = build_path(graph, node1, node2)
for node in path:
locs = node.ins_range
end_loc = loc2 if (locs[0] <= loc2 <= locs[1]) else locs[1]
if not clear_path_node(graph, reg, locs[0], end_loc):
return False
return True
def register_propagation(graph, du, ud):
'''
Propagate the temporary registers between instructions and remove them if
necessary.
We process the nodes of the graph in reverse post order. For each
instruction in the node, we look at the variables that it uses. For each of
these variables we look where it is defined and if we can replace it with
its definition.
We have to be careful to the side effects some instructions may have.
To do the propagation, we use the computed DU and UD chains.
'''
change = True
while change:
change = False
for node in graph.rpo:
for i, ins in node.get_loc_with_ins()[:]:
logger.debug('Treating instruction %d: %s', i, ins)
# We make sure the ins has not been deleted since the start of
# the iteration
if ins not in node.get_ins():
logger.debug(' => skip instruction (deleted)')
continue
logger.debug(' Used vars: %s', ins.get_used_vars())
for var in ins.get_used_vars():
# Get the list of locations this variable is defined at.
locs = ud[(var, i)]
logger.debug(' var %s defined in lines %s', var, locs)
# If the variable is uniquely defined for this instruction
# it may be eligible for propagation.
if len(locs) != 1:
continue
loc = locs[0]
# Methods parameters are defined with a location < 0.
if loc < 0:
continue
orig_ins = graph.get_ins_from_loc(loc)
logger.debug(' -> %s', orig_ins)
logger.debug(' -> DU(%s, %s) = %s', var, loc,
du[(var, loc)])
# We defined some instructions as not propagable.
# Actually this is the case only for array creation
# (new foo[x])
if not orig_ins.is_propagable():
logger.debug(' %s not propagable...', orig_ins)
continue
if not orig_ins.get_rhs().is_const():
# We only try to propagate constants and definition
# points which are used at only one location.
if len(du[(var, loc)]) > 1:
logger.debug(' => variable has multiple uses'
' and is not const => skip')
continue
# We check that the propagation is safe for all the
# variables that are used in the instruction.
# The propagation is not safe if there is a side effect
# along the path from the definition of the variable
# to its use in the instruction, or if the variable may
# be redifined along this path.
safe = True
orig_ins_used_vars = orig_ins.get_used_vars()
logger.debug(' variables used by the original '
'instruction: %s', orig_ins_used_vars)
for var2 in orig_ins_used_vars:
# loc is the location of the defined variable
# i is the location of the current instruction
if not clear_path(graph, var2, loc, i):
safe = False
break
if not safe:
logger.debug('Propagation NOT SAFE')
continue
# We also check that the instruction itself is
# propagable. If the instruction has a side effect it
# cannot be propagated if there is another side effect
# along the path
if orig_ins.has_side_effect():
if not clear_path(graph, None, loc, i):
logger.debug(' %s has side effect and the '
'path is not clear !', orig_ins)
continue
logger.debug(' => Modification of the instruction!')
logger.debug(' - BEFORE: %s', ins)
ins.replace(var, orig_ins.get_rhs())
logger.debug(' -> AFTER: %s', ins)
logger.debug('\t UD(%s, %s) : %s', var, i, ud[(var, i)])
ud[(var, i)].remove(loc)
logger.debug('\t -> %s', ud[(var, i)])
if len(ud[(var, i)]) == 0:
ud.pop((var, i))
for var2 in orig_ins.get_used_vars():
# We update the UD chain of the variables we
# propagate. We also have to take the
# definition points of all the variables used
# by the instruction and update the DU chain
# with this information.
old_ud = ud.get((var2, loc))
logger.debug('\t ud(%s, %s) = %s', var2, loc, old_ud)
# If the instruction use the same variable
# multiple times, the second+ time the ud chain
# will be None because already treated.
if old_ud is None:
continue
ud[(var2, i)].extend(old_ud)
logger.debug('\t - ud(%s, %s) = %s', var2, i,
ud[(var2, i)])
ud.pop((var2, loc))
for def_loc in old_ud:
du[(var2, def_loc)].remove(loc)
du[(var2, def_loc)].append(i)
new_du = du[(var, loc)]
logger.debug('\t new_du(%s, %s): %s', var, loc, new_du)
new_du.remove(i)
logger.debug('\t -> %s', new_du)
if not new_du:
logger.debug('\t REMOVING INS %d', loc)
du.pop((var, loc))
graph.remove_ins(loc)
change = True
class DummyNode(Node):
def __init__(self, name):
super(DummyNode, self).__init__(name)
def get_loc_with_ins(self):
return []
def __repr__(self):
return '%s-dumnode' % self.name
def __str__(self):
return '%s-dummynode' % self.name
def split_variables(graph, lvars, DU, UD):
treated = defaultdict(list)
variables = defaultdict(list)
for var, loc in sorted(DU):
if var not in lvars:
continue
if loc in treated[var]:
continue
defs = [loc]
uses = set(DU[(var, loc)])
change = True
while change:
change = False
for use in uses:
ldefs = UD[(var, use)]
for ldef in ldefs:
if ldef not in defs:
defs.append(ldef)
change = True
for ldef in defs[1:]:
luses = set(DU[(var, ldef)])
for use in luses:
if use not in uses:
uses.add(use)
change = True
treated[var].extend(defs)
variables[var].append((defs, list(uses)))
if lvars:
nb_vars = max(lvars) + 1
else:
nb_vars = 0
for var, versions in variables.iteritems():
nversions = len(versions)
if nversions == 1:
continue
orig_var = lvars.pop(var)
for i, (defs, uses) in enumerate(versions):
if min(defs) < 0: # Param
if orig_var.this:
new_version = ThisParam(var, orig_var.type)
else:
new_version = Param(var, orig_var.type)
lvars[var] = new_version
else:
new_version = Variable(nb_vars)
new_version.type = orig_var.type
lvars[nb_vars] = new_version # add new version to variables
nb_vars += 1
new_version.name = '%d_%d' % (var, i)
for loc in defs:
if loc < 0:
continue
ins = graph.get_ins_from_loc(loc)
ins.replace_lhs(new_version)
DU[(new_version.value(), loc)] = DU.pop((var, loc))
for loc in uses:
ins = graph.get_ins_from_loc(loc)
ins.replace_var(var, new_version)
UD[(new_version.value(), loc)] = UD.pop((var, loc))
def build_def_use(graph, lparams):
'''
Builds the Def-Use and Use-Def (DU/UD) chains of the variables of the
method.
'''
# We insert two special nodes : entry & exit, to the graph.
# This is done to simplify the reaching definition analysis.
old_entry = graph.entry
old_exit = graph.exit
new_entry = DummyNode('entry')
graph.add_node(new_entry)
graph.add_edge(new_entry, old_entry)
graph.entry = new_entry
if old_exit:
new_exit = DummyNode('exit')
graph.add_node(new_exit)
graph.add_edge(old_exit, new_exit)
graph.rpo.append(new_exit)
analysis = BasicReachDef(graph, set(lparams))
analysis.run()
# The analysis is done, We can now remove the two special nodes.
graph.remove_node(new_entry)
if old_exit:
graph.remove_node(new_exit)
graph.entry = old_entry
UD = defaultdict(list)
for node in graph.rpo:
for i, ins in node.get_loc_with_ins():
for var in ins.get_used_vars():
# var not in analysis.def_to_loc: test that the register
# exists. It is possible that it is not the case, when a
# variable is of a type which is stored on multiple registers
# e.g: a 'double' stored in v3 is also present in v4, so a call
# to foo(v3), will in fact call foo(v3, v4).
if var not in analysis.def_to_loc:
continue
ldefs = analysis.defs[node]
prior_def = -1
for v in ldefs.get(var, set()):
if prior_def < v < i:
prior_def = v
if prior_def >= 0:
UD[(var, i)].append(prior_def)
else:
intersect = analysis.def_to_loc[var].intersection(
analysis.R[node])
UD[(var, i)].extend(intersect)
DU = defaultdict(list)
for var_loc, defs_loc in UD.items():
var, loc = var_loc
for def_loc in defs_loc:
DU[(var, def_loc)].append(loc)
return UD, DU
def place_declarations(graph, dvars, du, ud):
idom = graph.immediate_dominators()
for node in graph.rpo:
for loc, ins in node.get_loc_with_ins():
for var in ins.get_used_vars():
if (not isinstance(dvars[var], Variable)
or isinstance(dvars[var], Param)):
continue
var_defs_locs = ud[(var, loc)]
def_nodes = set()
for def_loc in var_defs_locs:
def_node = graph.get_node_from_loc(def_loc)
# TODO: place declarations in catch if needed
if def_node.in_catch:
continue
def_nodes.add(def_node)
if not def_nodes:
continue
common_dominator = def_nodes.pop()
for def_node in def_nodes:
common_dominator = common_dom(
idom,common_dominator, def_node)
if any(var in range(*common_dominator.ins_range)
for var in ud[(var, loc)]):
continue
common_dominator.add_variable_declaration(dvars[var])
|
apache-2.0
|
ryanbay/node-gyp
|
gyp/pylib/gyp/easy_xml_test.py
|
2698
|
3270
|
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the easy_xml.py file. """
import gyp.easy_xml as easy_xml
import unittest
import StringIO
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def test_EasyXml_simple(self):
self.assertEqual(
easy_xml.XmlToString(['test']),
'<?xml version="1.0" encoding="utf-8"?><test/>')
self.assertEqual(
easy_xml.XmlToString(['test'], encoding='Windows-1252'),
'<?xml version="1.0" encoding="Windows-1252"?><test/>')
def test_EasyXml_simple_with_attributes(self):
self.assertEqual(
easy_xml.XmlToString(['test2', {'a': 'value1', 'b': 'value2'}]),
'<?xml version="1.0" encoding="utf-8"?><test2 a="value1" b="value2"/>')
def test_EasyXml_escaping(self):
original = '<test>\'"\r&\nfoo'
converted = '<test>\'"
&
foo'
converted_apos = converted.replace("'", ''')
self.assertEqual(
easy_xml.XmlToString(['test3', {'a': original}, original]),
'<?xml version="1.0" encoding="utf-8"?><test3 a="%s">%s</test3>' %
(converted, converted_apos))
def test_EasyXml_pretty(self):
self.assertEqual(
easy_xml.XmlToString(
['test3',
['GrandParent',
['Parent1',
['Child']
],
['Parent2']
]
],
pretty=True),
'<?xml version="1.0" encoding="utf-8"?>\n'
'<test3>\n'
' <GrandParent>\n'
' <Parent1>\n'
' <Child/>\n'
' </Parent1>\n'
' <Parent2/>\n'
' </GrandParent>\n'
'</test3>\n')
def test_EasyXml_complex(self):
# We want to create:
target = (
'<?xml version="1.0" encoding="utf-8"?>'
'<Project>'
'<PropertyGroup Label="Globals">'
'<ProjectGuid>{D2250C20-3A94-4FB9-AF73-11BC5B73884B}</ProjectGuid>'
'<Keyword>Win32Proj</Keyword>'
'<RootNamespace>automated_ui_tests</RootNamespace>'
'</PropertyGroup>'
'<Import Project="$(VCTargetsPath)\\Microsoft.Cpp.props"/>'
'<PropertyGroup '
'Condition="\'$(Configuration)|$(Platform)\'=='
'\'Debug|Win32\'" Label="Configuration">'
'<ConfigurationType>Application</ConfigurationType>'
'<CharacterSet>Unicode</CharacterSet>'
'</PropertyGroup>'
'</Project>')
xml = easy_xml.XmlToString(
['Project',
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', '{D2250C20-3A94-4FB9-AF73-11BC5B73884B}'],
['Keyword', 'Win32Proj'],
['RootNamespace', 'automated_ui_tests']
],
['Import', {'Project': '$(VCTargetsPath)\\Microsoft.Cpp.props'}],
['PropertyGroup',
{'Condition': "'$(Configuration)|$(Platform)'=='Debug|Win32'",
'Label': 'Configuration'},
['ConfigurationType', 'Application'],
['CharacterSet', 'Unicode']
]
])
self.assertEqual(xml, target)
if __name__ == '__main__':
unittest.main()
|
mit
|
shminer/kernel-msm-3.18
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
|
1891
|
3300
|
# Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
self.callchain = common_callchain
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
|
gpl-2.0
|
eHealthAfrica/rapidpro
|
temba/utils/management/commands/test_db.py
|
1
|
30746
|
from __future__ import unicode_literals, division, print_function
import json
import math
import pytz
import random
import resource
import six
import sys
import time
import uuid
from collections import defaultdict
from datetime import timedelta
from django.conf import settings
from django.contrib.auth.models import User
from django.core.management import BaseCommand, CommandError
from django.core.management.base import CommandParser
from django.db import connection, transaction
from django.utils import timezone
from django_redis import get_redis_connection
from subprocess import check_call, CalledProcessError
from temba.channels.models import Channel
from temba.channels.tasks import squash_channelcounts
from temba.contacts.models import Contact, ContactField, ContactGroup, ContactURN, ContactGroupCount, URN, TEL_SCHEME, TWITTER_SCHEME
from temba.flows.models import FlowStart, FlowRun
from temba.flows.tasks import squash_flowpathcounts, squash_flowruncounts, prune_recentmessages
from temba.locations.models import AdminBoundary
from temba.msgs.models import Label, Msg
from temba.msgs.tasks import squash_labelcounts
from temba.orgs.models import Org
from temba.orgs.tasks import squash_topupcredits
from temba.utils import chunk_list, ms_to_datetime, datetime_to_str, datetime_to_ms
from temba.values.models import Value
# maximum age in days of database content
CONTENT_AGE = 3 * 365
# every user will have this password including the superuser
USER_PASSWORD = "Qwerty123"
# database dump containing admin boundary records
LOCATIONS_DUMP = 'test-data/nigeria.bin'
# organization names are generated from these components
ORG_NAMES = (
("UNICEF", "WHO", "WFP", "UNESCO", "UNHCR", "UNITAR", "FAO", "UNEP", "UNAIDS", "UNDAF"),
("Nigeria", "Chile", "Indonesia", "Rwanda", "Mexico", "Zambia", "India", "Brazil", "Sudan", "Mozambique")
)
# the users, channels, groups, labels and fields to create for each organization
USERS = (
{'username': "admin%d", 'email': "org%[email protected]", 'role': 'administrators'},
{'username': "editor%d", 'email': "org%[email protected]", 'role': 'editors'},
{'username': "viewer%d", 'email': "org%[email protected]", 'role': 'viewers'},
{'username': "surveyor%d", 'email': "org%[email protected]", 'role': 'surveyors'},
)
CHANNELS = (
{'name': "Android", 'channel_type': Channel.TYPE_ANDROID, 'scheme': 'tel', 'address': "1234"},
{'name': "Nexmo", 'channel_type': Channel.TYPE_NEXMO, 'scheme': 'tel', 'address': "2345"},
{'name': "Twitter", 'channel_type': 'TT', 'scheme': 'twitter', 'address': "my_handle"},
)
FIELDS = (
{'key': 'gender', 'label': "Gender", 'value_type': Value.TYPE_TEXT},
{'key': 'age', 'label': "Age", 'value_type': Value.TYPE_DECIMAL},
{'key': 'joined', 'label': "Joined On", 'value_type': Value.TYPE_DATETIME},
{'key': 'ward', 'label': "Ward", 'value_type': Value.TYPE_WARD},
{'key': 'district', 'label': "District", 'value_type': Value.TYPE_DISTRICT},
{'key': 'state', 'label': "State", 'value_type': Value.TYPE_STATE},
)
GROUPS = (
{'name': "Reporters", 'query': None, 'member': 0.95}, # member is either a probability or callable
{'name': "Farmers", 'query': None, 'member': 0.5},
{'name': "Doctors", 'query': None, 'member': 0.4},
{'name': "Teachers", 'query': None, 'member': 0.3},
{'name': "Drivers", 'query': None, 'member': 0.2},
{'name': "Testers", 'query': None, 'member': 0.1},
{'name': "Empty", 'query': None, 'member': 0.0},
{'name': "Youth (Dynamic)", 'query': 'age <= 18', 'member': lambda c: c['age'] and c['age'] <= 18},
{'name': "Unregistered (Dynamic)", 'query': 'joined = ""', 'member': lambda c: not c['joined']},
{'name': "Districts (Dynamic)", 'query': 'district=Faskari or district=Zuru or district=Anka',
'member': lambda c: c['district'] and c['district'].name in ("Faskari", "Zuru", "Anka")},
)
LABELS = ("Reporting", "Testing", "Youth", "Farming", "Health", "Education", "Trade", "Driving", "Building", "Spam")
FLOWS = (
{'name': "Favorites", 'file': "favorites.json", 'templates': (
["blue", "mutzig", "bob"],
["orange", "green", "primus", "jeb"],
)},
{'name': "SMS Form", 'file': "sms_form.json", 'templates': (["22 F Seattle"], ["35 M MIAMI"])},
{'name': "Pick a Number", 'file': "pick_a_number.json", 'templates': (["1"], ["4"], ["5"], ["7"], ["8"])}
)
# contact names are generated from these components
CONTACT_NAMES = (
("", "Anne", "Bob", "Cathy", "Dave", "Evan", "Freda", "George", "Hallie", "Igor"),
("", "Jameson", "Kardashian", "Lopez", "Mooney", "Newman", "O'Shea", "Poots", "Quincy", "Roberts"),
)
CONTACT_LANGS = (None, "eng", "fre", "spa", "kin")
CONTACT_HAS_TEL_PROB = 0.9 # 9/10 contacts have a phone number
CONTACT_HAS_TWITTER_PROB = 0.1 # 1/10 contacts have a twitter handle
CONTACT_IS_STOPPED_PROB = 0.01 # 1/100 contacts are stopped
CONTACT_IS_BLOCKED_PROB = 0.01 # 1/100 contacts are blocked
CONTACT_IS_DELETED_PROB = 0.005 # 1/200 contacts are deleted
CONTACT_HAS_FIELD_PROB = 0.8 # 8/10 fields set for each contact
RUN_RESPONSE_PROB = 0.1 # 1/10 runs will be responded to
INBOX_MESSAGES = (("What is", "I like", "No"), ("beer", "tea", "coffee"), ("thank you", "please", "today"))
class Command(BaseCommand):
COMMAND_GENERATE = 'generate'
COMMAND_SIMULATE = 'simulate'
help = "Generates a database suitable for performance testing"
def add_arguments(self, parser):
cmd = self
subparsers = parser.add_subparsers(dest='command', help='Command to perform',
parser_class=lambda **kw: CommandParser(cmd, **kw))
gen_parser = subparsers.add_parser('generate', help='Generates a clean testing database')
gen_parser.add_argument('--orgs', type=int, action='store', dest='num_orgs', default=100)
gen_parser.add_argument('--contacts', type=int, action='store', dest='num_contacts', default=1000000)
gen_parser.add_argument('--seed', type=int, action='store', dest='seed', default=None)
sim_parser = subparsers.add_parser('simulate', help='Simulates activity on an existing database')
sim_parser.add_argument('--runs', type=int, action='store', dest='num_runs', default=500)
def handle(self, command, *args, **kwargs):
start = time.time()
if command == self.COMMAND_GENERATE:
self.handle_generate(kwargs['num_orgs'], kwargs['num_contacts'], kwargs['seed'])
else:
self.handle_simulate(kwargs['num_runs'])
time_taken = time.time() - start
self._log("Completed in %d secs, peak memory usage: %d MiB\n" % (int(time_taken), int(self.peak_memory())))
def handle_generate(self, num_orgs, num_contacts, seed):
"""
Creates a clean database
"""
seed = self.configure_random(num_orgs, seed)
self._log("Generating random base database (seed=%d)...\n" % seed)
try:
has_data = Org.objects.exists()
except Exception: # pragma: no cover
raise CommandError("Run migrate command first to create database tables")
if has_data:
raise CommandError("Can't generate content in non-empty database.")
self.batch_size = 5000
# the timespan being modelled by this database
self.db_ends_on = timezone.now()
self.db_begins_on = self.db_ends_on - timedelta(days=CONTENT_AGE)
# this is a new database so clear out redis
self._log("Clearing out Redis cache... ")
r = get_redis_connection()
r.flushdb()
self._log(self.style.SUCCESS("OK") + '\n')
superuser = User.objects.create_superuser("root", "[email protected]", USER_PASSWORD)
country, locations = self.load_locations(LOCATIONS_DUMP)
orgs = self.create_orgs(superuser, country, num_orgs)
self.create_users(orgs)
self.create_channels(orgs)
self.create_fields(orgs)
self.create_groups(orgs)
self.create_labels(orgs)
self.create_flows(orgs)
self.create_contacts(orgs, locations, num_contacts)
def handle_simulate(self, num_runs):
"""
Prepares to resume simulating flow activity on an existing database
"""
self._log("Resuming flow activity simulation on existing database...\n")
orgs = list(Org.objects.order_by('id'))
if not orgs:
raise CommandError("Can't simulate activity on an empty database")
self.configure_random(len(orgs))
# in real life Nexmo messages are throttled, but that's not necessary for this simulation
del Channel.CHANNEL_SETTINGS[Channel.TYPE_NEXMO]['max_tps']
inputs_by_flow_name = {f['name']: f['templates'] for f in FLOWS}
self._log("Preparing existing orgs... ")
for org in orgs:
flows = list(org.flows.order_by('id'))
for flow in flows:
flow.input_templates = inputs_by_flow_name[flow.name]
org.cache = {
'users': list(org.get_org_users().order_by('id')),
'channels': list(org.channels.order_by('id')),
'groups': list(ContactGroup.user_groups.filter(org=org).order_by('id')),
'flows': flows,
'contacts': list(org.org_contacts.values_list('id', flat=True)), # only ids to save memory
'activity': None
}
self._log(self.style.SUCCESS("OK") + '\n')
self.simulate_activity(orgs, num_runs)
def configure_random(self, num_orgs, seed=None):
if not seed:
seed = random.randrange(0, 65536)
self.random = random.Random(seed)
# monkey patch uuid4 so it returns the same UUIDs for the same seed, see https://github.com/joke2k/faker/issues/484#issuecomment-287931101
from temba.utils import models
models.uuid4 = lambda: uuid.UUID(int=(self.random.getrandbits(128) | (1 << 63) | (1 << 78)) & (~(1 << 79) & ~(1 << 77) & ~(1 << 76) & ~(1 << 62)))
# We want a variety of large and small orgs so when allocating content like contacts and messages, we apply a
# bias toward the beginning orgs. if there are N orgs, then the amount of content the first org will be
# allocated is (1/N) ^ (1/bias). This sets the bias so that the first org will get ~50% of the content:
self.org_bias = math.log(1.0 / num_orgs, 0.5)
return seed
def load_locations(self, path):
"""
Loads admin boundary records from the given dump of that table
"""
self._log("Loading locations from %s... " % path)
# load dump into current db with pg_restore
db_config = settings.DATABASES['default']
try:
check_call('export PGPASSWORD=%s && pg_restore -U%s -w -d %s %s' %
(db_config['PASSWORD'], db_config['USER'], db_config['NAME'], path), shell=True)
except CalledProcessError: # pragma: no cover
raise CommandError("Error occurred whilst calling pg_restore to load locations dump")
# fetch as tuples of (WARD, DISTRICT, STATE)
wards = AdminBoundary.objects.filter(level=3).prefetch_related('parent', 'parent__parent')
locations = [(w, w.parent, w.parent.parent) for w in wards]
country = AdminBoundary.objects.filter(level=0).get()
self._log(self.style.SUCCESS("OK") + '\n')
return country, locations
def create_orgs(self, superuser, country, num_total):
"""
Creates and initializes the orgs
"""
self._log("Creating %d orgs... " % num_total)
org_names = ['%s %s' % (o1, o2) for o2 in ORG_NAMES[1] for o1 in ORG_NAMES[0]]
self.random.shuffle(org_names)
orgs = []
for o in range(num_total):
orgs.append(Org(name=org_names[o % len(org_names)], timezone=self.random.choice(pytz.all_timezones),
brand='rapidpro.io', country=country,
created_on=self.db_begins_on, created_by=superuser, modified_by=superuser))
Org.objects.bulk_create(orgs)
orgs = list(Org.objects.order_by('id'))
self._log(self.style.SUCCESS("OK") + "\nInitializing orgs... ")
for o, org in enumerate(orgs):
org.initialize(topup_size=max((1000 - o), 1) * 1000)
# we'll cache some metadata on each org as it's created to save re-fetching things
org.cache = {
'users': [],
'fields': {},
'groups': [],
'system_groups': {g.group_type: g for g in ContactGroup.system_groups.filter(org=org)},
}
self._log(self.style.SUCCESS("OK") + '\n')
return orgs
def create_users(self, orgs):
"""
Creates a user of each type for each org
"""
self._log("Creating %d users... " % (len(orgs) * len(USERS)))
# create users for each org
for org in orgs:
for u in USERS:
user = User.objects.create_user(u['username'] % org.id, u['email'] % org.id, USER_PASSWORD)
getattr(org, u['role']).add(user)
user.set_org(org)
org.cache['users'].append(user)
self._log(self.style.SUCCESS("OK") + '\n')
def create_channels(self, orgs):
"""
Creates the channels for each org
"""
self._log("Creating %d channels... " % (len(orgs) * len(CHANNELS)))
for org in orgs:
user = org.cache['users'][0]
for c in CHANNELS:
Channel.objects.create(org=org, name=c['name'], channel_type=c['channel_type'],
address=c['address'], schemes=[c['scheme']],
created_by=user, modified_by=user)
self._log(self.style.SUCCESS("OK") + '\n')
def create_fields(self, orgs):
"""
Creates the contact fields for each org
"""
self._log("Creating %d fields... " % (len(orgs) * len(FIELDS)))
for org in orgs:
user = org.cache['users'][0]
for f in FIELDS:
field = ContactField.objects.create(org=org, key=f['key'], label=f['label'],
value_type=f['value_type'], show_in_table=True,
created_by=user, modified_by=user)
org.cache['fields'][f['key']] = field
self._log(self.style.SUCCESS("OK") + '\n')
def create_groups(self, orgs):
"""
Creates the contact groups for each org
"""
self._log("Creating %d groups... " % (len(orgs) * len(GROUPS)))
for org in orgs:
user = org.cache['users'][0]
for g in GROUPS:
if g['query']:
group = ContactGroup.create_dynamic(org, user, g['name'], g['query'])
else:
group = ContactGroup.user_groups.create(org=org, name=g['name'], created_by=user, modified_by=user)
group.member = g['member']
group.count = 0
org.cache['groups'].append(group)
self._log(self.style.SUCCESS("OK") + '\n')
def create_labels(self, orgs):
"""
Creates the message labels for each org
"""
self._log("Creating %d labels... " % (len(orgs) * len(LABELS)))
for org in orgs:
user = org.cache['users'][0]
for name in LABELS:
Label.label_objects.create(org=org, name=name, created_by=user, modified_by=user)
self._log(self.style.SUCCESS("OK") + '\n')
def create_flows(self, orgs):
"""
Creates the flows for each org
"""
self._log("Creating %d flows... " % (len(orgs) * len(FLOWS)))
for org in orgs:
user = org.cache['users'][0]
for f in FLOWS:
with open('media/test_flows/' + f['file'], 'r') as flow_file:
org.import_app(json.load(flow_file), user)
self._log(self.style.SUCCESS("OK") + '\n')
def create_contacts(self, orgs, locations, num_contacts):
"""
Creates test and regular contacts for this database. Returns tuples of org, contact id and the preferred urn
id to avoid trying to hold all contact and URN objects in memory.
"""
group_counts = defaultdict(int)
self._log("Creating %d test contacts..." % (len(orgs) * len(USERS)))
for org in orgs:
test_contacts = []
for user in org.cache['users']:
test_contacts.append(Contact.get_test_contact(user))
org.cache['test_contacts'] = test_contacts
self._log(self.style.SUCCESS("OK") + '\n')
self._log("Creating %d regular contacts...\n" % num_contacts)
# disable table triggers to speed up insertion and in the case of contact group m2m, avoid having an unsquashed
# count row for every contact
with DisableTriggersOn(Contact, ContactURN, Value, ContactGroup.contacts.through):
names = [('%s %s' % (c1, c2)).strip() for c2 in CONTACT_NAMES[1] for c1 in CONTACT_NAMES[0]]
names = [n if n else None for n in names]
batch_num = 1
for index_batch in chunk_list(six.moves.xrange(num_contacts), self.batch_size):
batch = []
# generate flat representations and contact objects for this batch
for c_index in index_batch: # pragma: no cover
org = self.random_org(orgs)
name = self.random_choice(names)
location = self.random_choice(locations) if self.probability(CONTACT_HAS_FIELD_PROB) else None
created_on = self.timeline_date(c_index / num_contacts)
c = {
'org': org,
'user': org.cache['users'][0],
'name': name,
'groups': [],
'tel': '+2507%08d' % c_index if self.probability(CONTACT_HAS_TEL_PROB) else None,
'twitter': '%s%d' % (name.replace(' ', '_').lower() if name else 'tweep', c_index) if self.probability(CONTACT_HAS_TWITTER_PROB) else None,
'gender': self.random_choice(('M', 'F')) if self.probability(CONTACT_HAS_FIELD_PROB) else None,
'age': self.random.randint(16, 80) if self.probability(CONTACT_HAS_FIELD_PROB) else None,
'joined': self.random_date() if self.probability(CONTACT_HAS_FIELD_PROB) else None,
'ward': location[0] if location else None,
'district': location[1] if location else None,
'state': location[2] if location else None,
'language': self.random_choice(CONTACT_LANGS),
'is_stopped': self.probability(CONTACT_IS_STOPPED_PROB),
'is_blocked': self.probability(CONTACT_IS_BLOCKED_PROB),
'is_active': self.probability(1 - CONTACT_IS_DELETED_PROB),
'created_on': created_on,
'modified_on': self.random_date(created_on, self.db_ends_on),
}
# work out which system groups this contact belongs to
if c['is_active']:
if not c['is_blocked'] and not c['is_stopped']:
c['groups'].append(org.cache['system_groups'][ContactGroup.TYPE_ALL])
if c['is_blocked']:
c['groups'].append(org.cache['system_groups'][ContactGroup.TYPE_BLOCKED])
if c['is_stopped']:
c['groups'].append(org.cache['system_groups'][ContactGroup.TYPE_STOPPED])
# let each user group decide if it is taking this contact
for g in org.cache['groups']:
if g.member(c) if callable(g.member) else self.probability(g.member):
c['groups'].append(g)
# track changes to group counts
for g in c['groups']:
group_counts[g] += 1
batch.append(c)
self._create_contact_batch(batch)
self._log(" > Created batch %d of %d\n" % (batch_num, max(num_contacts // self.batch_size, 1)))
batch_num += 1
# create group count records manually
counts = []
for group, count in group_counts.items():
counts.append(ContactGroupCount(group=group, count=count, is_squashed=True))
group.count = count
ContactGroupCount.objects.bulk_create(counts)
def _create_contact_batch(self, batch):
"""
Bulk creates a batch of contacts from flat representations
"""
for c in batch:
c['object'] = Contact(org=c['org'], name=c['name'], language=c['language'],
is_stopped=c['is_stopped'], is_blocked=c['is_blocked'],
is_active=c['is_active'],
created_by=c['user'], created_on=c['created_on'],
modified_by=c['user'], modified_on=c['modified_on'])
Contact.objects.bulk_create([c['object'] for c in batch])
# now that contacts have pks, bulk create the actual URN, value and group membership objects
batch_urns = []
batch_values = []
batch_memberships = []
for c in batch:
org = c['org']
c['urns'] = []
if c['tel']:
c['urns'].append(ContactURN(org=org, contact=c['object'], priority=50, scheme=TEL_SCHEME,
path=c['tel'], identity=URN.from_tel(c['tel'])))
if c['twitter']:
c['urns'].append(ContactURN(org=org, contact=c['object'], priority=50, scheme=TWITTER_SCHEME,
path=c['twitter'], identity=URN.from_twitter(c['twitter'])))
if c['gender']:
batch_values.append(Value(org=org, contact=c['object'], contact_field=org.cache['fields']['gender'],
string_value=c['gender']))
if c['age']:
batch_values.append(Value(org=org, contact=c['object'], contact_field=org.cache['fields']['age'],
string_value=str(c['age']), decimal_value=c['age']))
if c['joined']:
batch_values.append(Value(org=org, contact=c['object'], contact_field=org.cache['fields']['joined'],
string_value=datetime_to_str(c['joined']), datetime_value=c['joined']))
if c['ward']:
batch_values.append(Value(org=org, contact=c['object'], contact_field=org.cache['fields']['ward'],
string_value=c['ward'].name, location_value=c['ward']))
if c['district']:
batch_values.append(Value(org=org, contact=c['object'], contact_field=org.cache['fields']['district'],
string_value=c['district'].name, location_value=c['district']))
if c['state']:
batch_values.append(Value(org=org, contact=c['object'], contact_field=org.cache['fields']['state'],
string_value=c['state'].name, location_value=c['state']))
for g in c['groups']:
batch_memberships.append(ContactGroup.contacts.through(contact=c['object'], contactgroup=g))
batch_urns += c['urns']
ContactURN.objects.bulk_create(batch_urns)
Value.objects.bulk_create(batch_values)
ContactGroup.contacts.through.objects.bulk_create(batch_memberships)
def simulate_activity(self, orgs, num_runs):
self._log("Starting simulation. Ctrl+C to cancel...\n")
runs = 0
while runs < num_runs:
try:
with transaction.atomic():
# make sure every org has an active flow
for org in orgs:
if not org.cache['activity']:
self.start_flow_activity(org)
with transaction.atomic():
org = self.random_org(orgs)
if self.probability(0.1):
self.create_unsolicited_incoming(org)
else:
self.create_flow_run(org)
runs += 1
except KeyboardInterrupt:
self._log("Shutting down...\n")
break
squash_channelcounts()
squash_flowpathcounts()
squash_flowruncounts()
prune_recentmessages()
squash_topupcredits()
squash_labelcounts()
def start_flow_activity(self, org):
assert not org.cache['activity']
user = org.cache['users'][0]
flow = self.random_choice(org.cache['flows'])
if self.probability(0.9):
# start a random group using a flow start
group = self.random_choice(org.cache['groups'])
contacts_started = list(group.contacts.values_list('id', flat=True))
self._log(" > Starting flow %s for group %s (%d) in org %s\n"
% (flow.name, group.name, len(contacts_started), org.name))
start = FlowStart.create(flow, user, groups=[group], restart_participants=True)
start.start()
else:
# start a random individual without a flow start
if not org.cache['contacts']:
return
contact = Contact.objects.get(id=self.random_choice(org.cache['contacts']))
contacts_started = [contact.id]
self._log(" > Starting flow %s for contact #%d in org %s\n" % (flow.name, contact.id, org.name))
flow.start([], [contact], restart_participants=True)
org.cache['activity'] = {'flow': flow, 'unresponded': contacts_started, 'started': list(contacts_started)}
def end_flow_activity(self, org):
self._log(" > Ending flow %s for in org %s\n" % (org.cache['activity']['flow'].name, org.name))
org.cache['activity'] = None
runs = FlowRun.objects.filter(org=org, is_active=True)
FlowRun.bulk_exit(runs, FlowRun.EXIT_TYPE_EXPIRED)
def create_flow_run(self, org):
activity = org.cache['activity']
flow = activity['flow']
if activity['unresponded']:
contact_id = self.random_choice(activity['unresponded'])
activity['unresponded'].remove(contact_id)
contact = Contact.objects.get(id=contact_id)
urn = contact.urns.first()
if urn:
self._log(" > Receiving flow responses for flow %s in org %s\n" % (flow.name, flow.org.name))
inputs = self.random_choice(flow.input_templates)
for text in inputs:
channel = flow.org.cache['channels'][0]
Msg.create_incoming(channel, six.text_type(urn), text)
# if more than 10% of contacts have responded, consider flow activity over
if len(activity['unresponded']) <= (len(activity['started']) * 0.9):
self.end_flow_activity(flow.org)
def create_unsolicited_incoming(self, org):
if not org.cache['contacts']:
return
self._log(" > Receiving unsolicited incoming message in org %s\n" % org.name)
available_contacts = list(set(org.cache['contacts']) - set(org.cache['activity']['started']))
if available_contacts:
contact = Contact.objects.get(id=self.random_choice(available_contacts))
channel = self.random_choice(org.cache['channels'])
urn = contact.urns.first()
if urn:
text = ' '.join([self.random_choice(l) for l in INBOX_MESSAGES])
Msg.create_incoming(channel, six.text_type(urn), text)
def probability(self, prob):
return self.random.random() < prob
def random_choice(self, seq, bias=1.0):
if not seq:
raise ValueError("Can't select random item from empty sequence")
return seq[int(math.pow(self.random.random(), bias) * len(seq))]
def weighted_choice(self, seq, weights):
r = self.random.random() * sum(weights)
cum_weight = 0.0
for i, item in enumerate(seq):
cum_weight += weights[i]
if r < cum_weight or (i == len(seq) - 1):
return item
def random_org(self, orgs):
"""
Returns a random org with bias toward the orgs with the lowest indexes
"""
return self.random_choice(orgs, bias=self.org_bias)
def random_date(self, start=None, end=None):
if not end:
end = timezone.now()
if not start:
start = end - timedelta(days=365)
if start == end:
return end
return ms_to_datetime(self.random.randrange(datetime_to_ms(start), datetime_to_ms(end)))
def timeline_date(self, dist):
"""
Converts a 0..1 distance into a date on this database's overall timeline
"""
seconds_span = (self.db_ends_on - self.db_begins_on).total_seconds()
return self.db_begins_on + timedelta(seconds=(seconds_span * dist))
@staticmethod
def peak_memory():
rusage_denom = 1024
if sys.platform == 'darwin':
# OSX gives value in bytes, other OSes in kilobytes
rusage_denom *= rusage_denom
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / rusage_denom
def _log(self, text):
self.stdout.write(text, ending='')
self.stdout.flush()
class DisableTriggersOn(object):
"""
Helper context manager for temporarily disabling database triggers for a given model
"""
def __init__(self, *models):
self.tables = [m._meta.db_table for m in models]
def __enter__(self):
with connection.cursor() as cursor:
for table in self.tables:
cursor.execute('ALTER TABLE %s DISABLE TRIGGER ALL;' % table)
def __exit__(self, exc_type, exc_val, exc_tb):
with connection.cursor() as cursor:
for table in self.tables:
cursor.execute('ALTER TABLE %s ENABLE TRIGGER ALL;' % table)
|
agpl-3.0
|
michaelhowden/eden
|
modules/geopy/geocoders/openmapquest.py
|
32
|
2503
|
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
import gluon.contrib.simplejson as json # fallback to pure-Python module
from urllib import urlencode
from urllib2 import urlopen
from geopy.geocoders.base import Geocoder
from geopy.util import logger, decode_page, join_filter
class OpenMapQuest(Geocoder):
"""Geocoder using the MapQuest Open Platform Web Services."""
def __init__(self, api_key='', format_string='%s'):
"""Initialize an Open MapQuest geocoder with location-specific
address information, no API Key is needed by the Nominatim based
platform.
``format_string`` is a string containing '%s' where the string to
geocode should be interpolated before querying the geocoder.
For example: '%s, Mountain View, CA'. The default is just '%s'.
"""
self.api_key = api_key
self.format_string = format_string
self.url = "http://open.mapquestapi.com/nominatim/v1/search?format=json&%s"
def geocode(self, string, exactly_one=True):
if isinstance(string, unicode):
string = string.encode('utf-8')
params = {'q': self.format_string % string}
url = self.url % urlencode(params)
logger.debug("Fetching %s..." % url)
page = urlopen(url)
return self.parse_json(page, exactly_one)
def parse_json(self, page, exactly_one=True):
"""Parse display name, latitude, and longitude from an JSON response."""
if not isinstance(page, basestring):
page = decode_page(page)
resources = json.loads(page)
if exactly_one and len(resources) != 1:
from warnings import warn
warn("Didn't find exactly one resource!" + \
"(Found %d.), use exactly_one=False\n" % len(resources)
)
def parse_resource(resource):
location = resource['display_name']
latitude = resource['lat'] or None
longitude = resource['lon'] or None
if latitude and longitude:
latitude = float(latitude)
longitude = float(longitude)
return (location, (latitude, longitude))
if exactly_one:
return parse_resource(resources[0])
else:
return [parse_resource(resource) for resource in resources]
|
mit
|
loretoparisi/nupic
|
tests/integration/nupic/opf/opf_experiment_results_test.py
|
12
|
15515
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
## @file
This file tests specific experiments to see if they are providing the
correct results. These are high level tests of the algorithms themselves.
"""
import os
import shutil
from subprocess import call
import time
import unittest2 as unittest
from pkg_resources import resource_filename
from nupic.data.file_record_stream import FileRecordStream
class OPFExperimentResultsTest(unittest.TestCase):
def testExperimentResults(self):
"""Run specific experiments and verify that they are producing the correct
results.
opfDir is the examples/opf directory in the install path
and is used to find run_opf_experiment.py
The testdir is the directory that contains the experiments we will be
running. When running in the auto-build setup, this will be a temporary
directory that has had this script, as well as the specific experiments
we will be running, copied into it by the qa/autotest/prediction_results.py
script.
When running stand-alone from the command line, this will point to the
examples/prediction directory in the install tree (same as predictionDir)
"""
opfDir = resource_filename("nupic", os.path.join("..", "examples", "opf"))
testDir = opfDir
# The testdir is the directory that contains the experiments we will be
# running. When running in the auto-build setup, this will be a temporary
# directory that has had this script, as well as the specific experiments
# we will be running, copied into it by the
# qa/autotest/prediction_results.py script.
# When running stand-alone from the command line, we can simply point to the
# examples/prediction directory in the install tree.
if not os.path.exists(os.path.join(testDir, "experiments/classification")):
testDir = opfDir
# Generate any dynamically generated datasets now
command = ['python', os.path.join(testDir, 'experiments', 'classification',
'makeDatasets.py')]
retval = call(command)
self.assertEqual(retval, 0)
# Generate any dynamically generated datasets now
command = ['python', os.path.join(testDir, 'experiments', 'multistep',
'make_datasets.py')]
retval = call(command)
self.assertEqual(retval, 0)
# Generate any dynamically generated datasets now
command = ['python', os.path.join(testDir, 'experiments',
'spatial_classification', 'make_datasets.py')]
retval = call(command)
self.assertEqual(retval, 0)
# Run from the test directory so that we can find our experiments
os.chdir(testDir)
runExperiment = resource_filename("nupic", os.path.join("..",
"scripts", "run_opf_experiment.py"))
# A list of experiments to run. Valid attributes:
# experimentDir - Required, path to the experiment directory containing
# description.py
# args - optional. List of arguments for run_opf_experiment
# results - A dictionary of expected results. The keys are tuples
# containing (predictionLogFileName, columnName). The
# value is a (min, max) expected value from the last row
# in the prediction log.
multistepTests = [
# For this one, in theory the error for 1 step should be < 0.20
{ 'experimentDir': 'experiments/multistep/simple_0',
'results': {
('DefaultTask.TemporalMultiStep.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=1:window=200:field=field1"):
(0.0, 0.20),
}
},
# For this one, in theory the error for 1 step should be < 0.50, but we
# get slightly higher because our sample size is smaller than ideal
{ 'experimentDir': 'experiments/multistep/simple_0_f2',
'results': {
('DefaultTask.TemporalMultiStep.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='aae':steps=1:window=200:field=field2"):
(0.0, 0.525),
}
},
# For this one, in theory the error for 1 step should be < 0.20
{ 'experimentDir': 'experiments/multistep/simple_1',
'results': {
('DefaultTask.TemporalMultiStep.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=1:window=200:field=field1"):
(0.0, 0.20),
}
},
# For this test, we haven't figured out the theoretical error, this
# error is determined empirically from actual results
{ 'experimentDir': 'experiments/multistep/simple_1_f2',
'results': {
('DefaultTask.TemporalMultiStep.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='aae':steps=1:window=200:field=field2"):
(0.0, 3.65),
}
},
# For this one, in theory the error for 1 step should be < 0.20
{ 'experimentDir': 'experiments/multistep/simple_2',
'results': {
('DefaultTask.TemporalMultiStep.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=1:window=200:field=field1"):
(0.0, 0.20),
}
},
# For this one, in theory the error for 1 step should be < 0.10 and for
# 3 step < 0.30, but our actual results are better.
{ 'experimentDir': 'experiments/multistep/simple_3',
'results': {
('DefaultTask.TemporalMultiStep.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=1:window=200:field=field1"):
(0.0, 0.06),
('DefaultTask.TemporalMultiStep.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=3:window=200:field=field1"):
(0.0, 0.20),
}
},
# For this test, we haven't figured out the theoretical error, this
# error is determined empirically from actual results
{ 'experimentDir': 'experiments/multistep/simple_3_f2',
'results': {
('DefaultTask.TemporalMultiStep.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='aae':steps=1:window=200:field=field2"):
(0.0, 0.6),
('DefaultTask.TemporalMultiStep.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='aae':steps=3:window=200:field=field2"):
(0.0, 1.8),
}
},
# Test missing record support.
# Should have 0 error by the end of the dataset
{ 'experimentDir': 'experiments/missing_record/simple_0',
'results': {
('DefaultTask.NontemporalMultiStep.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=1:window=25:field=field1"):
(1.0, 1.0),
}
},
] # end of multistepTests
classificationTests = [
# ----------------------------------------------------------------------
# Classification Experiments
{ 'experimentDir': 'experiments/classification/category_hub_TP_0',
'results': {
('OnlineLearning.TemporalClassification.predictionLog.csv',
'classification:avg_err:window=200'): (0.0, 0.005),
}
},
{ 'experimentDir': 'experiments/classification/category_TP_0',
'results': {
('OnlineLearning.TemporalClassification.predictionLog.csv',
'classification:avg_err:window=200'): (0.0, 0.045),
('OnlineLearning.TemporalClassification.predictionLog.csv',
'classConfidences:neg_auc:computeEvery=10:window=200'): (-1.0, -0.99),
}
},
{ 'experimentDir': 'experiments/classification/category_TP_1',
'results': {
('OnlineLearning.TemporalClassification.predictionLog.csv',
'classification:avg_err:window=200'): (0.0, 0.005),
}
},
{ 'experimentDir': 'experiments/classification/scalar_TP_0',
'results': {
('OnlineLearning.TemporalClassification.predictionLog.csv',
'classification:avg_err:window=200'): (0.0, 0.155),
('OnlineLearning.TemporalClassification.predictionLog.csv',
'classConfidences:neg_auc:computeEvery=10:window=200'): (-1.0, -0.900),
}
},
{ 'experimentDir': 'experiments/classification/scalar_TP_1',
'results': {
('OnlineLearning.TemporalClassification.predictionLog.csv',
'classification:avg_err:window=200'): (0.0, 0.03),
}
},
] # End of classification tests
spatialClassificationTests = [
{ 'experimentDir': 'experiments/spatial_classification/category_0',
'results': {
('DefaultTask.NontemporalClassification.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=0:window=100:field=classification"):
(0.0, 0.05),
}
},
{ 'experimentDir': 'experiments/spatial_classification/category_1',
'results': {
('DefaultTask.NontemporalClassification.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=0:window=100:field=classification"):
(0.0, 0.0),
}
},
{ 'experimentDir': 'experiments/spatial_classification/scalar_0',
'results': {
('DefaultTask.NontemporalClassification.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='aae':steps=0:window=100:field=classification"):
(0.0, 0.025),
}
},
{ 'experimentDir': 'experiments/spatial_classification/scalar_1',
'results': {
('DefaultTask.NontemporalClassification.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='aae':steps=0:window=100:field=classification"):
(0.0, 0.01),
}
},
]
anomalyTests = [
# ----------------------------------------------------------------------
# Classification Experiments
{ 'experimentDir': 'experiments/anomaly/temporal/simple',
'results': {
('DefaultTask.TemporalAnomaly.predictionLog.csv',
'anomalyScore:passThruPrediction:window=1000:field=f'): (0.13,
0.14),
}
},
] # End of anomaly tests
tests = []
tests += multistepTests
tests += classificationTests
tests += spatialClassificationTests
tests += anomalyTests
# Uncomment this to only run a specific experiment(s)
#tests = tests[7:8]
# This contains a list of tuples: (expDir, key, results)
summaryOfResults = []
startTime = time.time()
testIdx = -1
for test in tests:
testIdx += 1
expDirectory = test['experimentDir']
# -------------------------------------------------------------------
# Remove files/directories generated by previous tests:
toDelete = []
# Remove inference results
path = os.path.join(expDirectory, "inference")
toDelete.append(path)
path = os.path.join(expDirectory, "savedmodels")
toDelete.append(path)
for path in toDelete:
if not os.path.exists(path):
continue
print "Removing %s ..." % path
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
# ------------------------------------------------------------------------
# Run the test.
args = test.get('args', [])
print "Running experiment %s ..." % (expDirectory)
command = ['python', runExperiment, expDirectory] + args
retVal = call(command)
# If retVal is non-zero and this was not a negative test or if retVal is
# zero and this is a negative test something went wrong.
if retVal:
print "Details of failed test: %s" % test
print("TestIdx %d, OPF experiment '%s' failed with return code %i." %
(testIdx, expDirectory, retVal))
self.assertFalse(retVal)
# -----------------------------------------------------------------------
# Check the results
for (key, expValues) in test['results'].items():
(logFilename, colName) = key
# Open the prediction log file
logFile = FileRecordStream(os.path.join(expDirectory, 'inference',
logFilename))
colNames = [x[0] for x in logFile.getFields()]
if not colName in colNames:
print "TestIdx %d: %s not one of the columns in " \
"prediction log file. Available column names are: %s" % (testIdx,
colName, colNames)
self.assertTrue(colName in colNames)
colIndex = colNames.index(colName)
# Read till we get to the last line
while True:
try:
row = logFile.next()
except StopIteration:
break
result = row[colIndex]
# Save summary of results
summaryOfResults.append((expDirectory, colName, result))
print "Actual result for %s, %s:" % (expDirectory, colName), result
print "Expected range:", expValues
failed = (expValues[0] is not None and result < expValues[0]) \
or (expValues[1] is not None and result > expValues[1])
if failed:
print ("TestIdx %d: Experiment %s failed. \nThe actual result"
" for %s (%s) was outside the allowed range of %s" % (testIdx,
expDirectory, colName, result, expValues))
else:
print " Within expected range."
self.assertFalse(failed)
# =======================================================================
# Print summary of results:
print
print "Summary of results in all experiments run:"
print "========================================="
prevExpDir = None
for (expDir, key, results) in summaryOfResults:
if expDir != prevExpDir:
print
print expDir
prevExpDir = expDir
print " %s: %s" % (key, results)
print "\nElapsed time: %.1f seconds" % (time.time() - startTime)
if __name__ == "__main__":
unittest.main()
|
agpl-3.0
|
aliyun/aliyun-oss-python-sdk
|
tests/test_server_side_encryotion.py
|
1
|
19249
|
# -*- coding: utf-8 -*-
from .common import *
from oss2.models import ServerSideEncryptionRule, PartInfo
from oss2 import (SERVER_SIDE_ENCRYPTION_KMS, SERVER_SIDE_ENCRYPTION_AES256,
SERVER_SIDE_ENCRYPTION_SM4, KMS_DATA_ENCRYPTION_SM4)
from oss2.headers import (OSS_SERVER_SIDE_ENCRYPTION, OSS_SERVER_SIDE_ENCRYPTION_KEY_ID,
OSS_SERVER_SIDE_DATA_ENCRYPTION)
from oss2 import determine_part_size, SizedFileAdapter
class TestSSEDataEncryption(OssTestCase):
def setUp(self):
OssTestCase.setUp(self)
self.endpoint = OSS_ENDPOINT
def test_put_bucket_encryption(self):
auth = oss2.Auth(OSS_ID, OSS_SECRET)
bucket_name = OSS_BUCKET + "-test-put-bucket-encryption"
bucket = oss2.Bucket(auth, self.endpoint, bucket_name)
bucket.create_bucket()
# set SM4
rule = ServerSideEncryptionRule()
rule.sse_algorithm = oss2.SERVER_SIDE_ENCRYPTION_SM4
bucket.put_bucket_encryption(rule)
result = bucket.get_bucket_encryption()
self.assertEqual(SERVER_SIDE_ENCRYPTION_SM4, result.sse_algorithm)
self.assertIsNone(result.kms_master_keyid)
self.assertIsNone(result.kms_data_encryption)
bucket_info = bucket.get_bucket_info()
rule = bucket_info.bucket_encryption_rule
self.assertEqual(SERVER_SIDE_ENCRYPTION_SM4, rule.sse_algorithm)
self.assertIsNone(result.kms_master_keyid)
self.assertIsNone(result.kms_data_encryption)
# set KMS and data SM4, and none kms_key_id.
rule = ServerSideEncryptionRule()
rule.sse_algorithm = SERVER_SIDE_ENCRYPTION_KMS
rule.kms_data_encryption = KMS_DATA_ENCRYPTION_SM4
bucket.put_bucket_encryption(rule)
result = bucket.get_bucket_encryption()
self.assertEqual(SERVER_SIDE_ENCRYPTION_KMS, result.sse_algorithm)
self.assertIsNone(result.kms_master_keyid)
self.assertEqual(KMS_DATA_ENCRYPTION_SM4, result.kms_data_encryption)
bucket_info = bucket.get_bucket_info()
rule = bucket_info.bucket_encryption_rule
self.assertEqual(SERVER_SIDE_ENCRYPTION_KMS, rule.sse_algorithm)
self.assertIsNone(rule.kms_master_keyid)
self.assertEqual(KMS_DATA_ENCRYPTION_SM4, rule.kms_data_encryption)
# set KMS and SM4, and has kms key id
rule = ServerSideEncryptionRule()
rule.sse_algorithm = SERVER_SIDE_ENCRYPTION_KMS
rule.kms_master_keyid = '123'
rule.kms_data_encryption = KMS_DATA_ENCRYPTION_SM4
bucket.put_bucket_encryption(rule)
result = bucket.get_bucket_encryption()
self.assertEqual(SERVER_SIDE_ENCRYPTION_KMS, result.sse_algorithm)
self.assertEqual('123', result.kms_master_keyid)
self.assertEqual(KMS_DATA_ENCRYPTION_SM4, result.kms_data_encryption)
bucket_info = bucket.get_bucket_info()
rule = bucket_info.bucket_encryption_rule
self.assertEqual(SERVER_SIDE_ENCRYPTION_KMS, rule.sse_algorithm)
self.assertEqual('123', rule.kms_master_keyid)
self.assertEqual(KMS_DATA_ENCRYPTION_SM4, rule.kms_data_encryption)
# set AES256 and data encryption is not none
rule = ServerSideEncryptionRule()
rule.sse_algorithm = SERVER_SIDE_ENCRYPTION_AES256
rule.kms_data_encryption = KMS_DATA_ENCRYPTION_SM4
bucket.put_bucket_encryption(rule)
result = bucket.get_bucket_encryption()
self.assertEqual(SERVER_SIDE_ENCRYPTION_AES256, result.sse_algorithm)
self.assertIsNone(result.kms_master_keyid)
self.assertIsNone(result.kms_data_encryption)
bucket_info = bucket.get_bucket_info()
rule = bucket_info.bucket_encryption_rule
self.assertEqual(SERVER_SIDE_ENCRYPTION_AES256, rule.sse_algorithm)
self.assertIsNone(rule.kms_master_keyid)
self.assertIsNone(rule.kms_data_encryption)
# set SM4 and data encryption is not none
rule = ServerSideEncryptionRule()
rule.sse_algorithm = SERVER_SIDE_ENCRYPTION_SM4
rule.kms_data_encryption = KMS_DATA_ENCRYPTION_SM4
bucket.put_bucket_encryption(rule)
result = bucket.get_bucket_encryption()
self.assertEqual(SERVER_SIDE_ENCRYPTION_SM4, result.sse_algorithm)
self.assertIsNone(result.kms_master_keyid)
self.assertIsNone(result.kms_data_encryption)
bucket_info = bucket.get_bucket_info()
rule = bucket_info.bucket_encryption_rule
self.assertEqual(SERVER_SIDE_ENCRYPTION_SM4, rule.sse_algorithm)
self.assertIsNone(result.kms_master_keyid)
self.assertIsNone(result.kms_data_encryption)
def inner_put_object_with_encryption(self, bucket, object_name, data, sse_algorithm, data_algorithm):
headers = dict()
if sse_algorithm:
headers[OSS_SERVER_SIDE_ENCRYPTION] = sse_algorithm
if data_algorithm:
headers[OSS_SERVER_SIDE_DATA_ENCRYPTION] = data_algorithm
result = bucket.put_object(object_name, data, headers=headers)
ret_sse_algo = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION)
ret_data_algo = result.headers.get(OSS_SERVER_SIDE_DATA_ENCRYPTION)
ret_kms_key_id = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION_KEY_ID)
return ret_sse_algo, ret_data_algo, ret_kms_key_id
def test_put_object_with_encryption(self):
auth = oss2.Auth(OSS_ID, OSS_SECRET)
bucket_name = OSS_BUCKET + "-test-put-object-data-encryption"
bucket = oss2.Bucket(auth, self.endpoint, bucket_name)
bucket.create_bucket()
object_name = 'test-put-object-none-encryption'
data = b'a'
sse_algo, data_algo, key_id = self.inner_put_object_with_encryption(bucket, object_name, data, None, None)
self.assertIsNone(sse_algo)
self.assertIsNone(data_algo)
self.assertIsNone(key_id)
object_name = 'test-put-object-kms'
sse_algo, data_algo, key_id = self.inner_put_object_with_encryption(bucket, object_name, data, SERVER_SIDE_ENCRYPTION_KMS, None)
self.assertEqual(SERVER_SIDE_ENCRYPTION_KMS, sse_algo)
self.assertIsNone(data_algo)
self.assertIsNotNone(key_id)
object_name = 'test-put-object-aes'
sse_algo, data_algo, key_id = self.inner_put_object_with_encryption(bucket, object_name, data, SERVER_SIDE_ENCRYPTION_AES256, None)
self.assertEqual(SERVER_SIDE_ENCRYPTION_AES256, sse_algo)
self.assertIsNone(data_algo)
self.assertIsNone(key_id)
object_name = 'test-put-object-sm4'
sse_algo, data_algo, key_id = self.inner_put_object_with_encryption(bucket, object_name, data, SERVER_SIDE_ENCRYPTION_SM4, None)
self.assertEqual(SERVER_SIDE_ENCRYPTION_SM4, sse_algo)
self.assertIsNone(data_algo)
self.assertIsNone(key_id)
object_name = 'test-put-object-kms-sm4'
sse_algo, data_algo, key_id = self.inner_put_object_with_encryption(bucket, object_name, data, SERVER_SIDE_ENCRYPTION_KMS, KMS_DATA_ENCRYPTION_SM4)
self.assertEqual(SERVER_SIDE_ENCRYPTION_KMS, sse_algo)
self.assertEqual(KMS_DATA_ENCRYPTION_SM4, data_algo)
self.assertIsNotNone(key_id)
object_name = 'test-put-object-aes256-sm4'
sse_algo, data_algo, key_id = self.inner_put_object_with_encryption(bucket, object_name, data, SERVER_SIDE_ENCRYPTION_AES256, KMS_DATA_ENCRYPTION_SM4)
self.assertEqual(SERVER_SIDE_ENCRYPTION_AES256, sse_algo)
self.assertIsNone(data_algo)
self.assertIsNone(key_id)
object_name = 'test-put-object-sm4-sm4'
sse_algo, data_algo, key_id = self.inner_put_object_with_encryption(bucket, object_name, data, SERVER_SIDE_ENCRYPTION_SM4, KMS_DATA_ENCRYPTION_SM4)
self.assertEqual(SERVER_SIDE_ENCRYPTION_SM4, sse_algo)
self.assertIsNone(data_algo)
self.assertIsNone(key_id)
object_name = 'test-put-object-none-sm4'
sse_algo, data_algo, key_id = self.inner_put_object_with_encryption(bucket, object_name, data, None, KMS_DATA_ENCRYPTION_SM4)
self.assertIsNone(sse_algo)
self.assertIsNone(data_algo)
self.assertIsNone(key_id)
def inner_append_object_with_encryption(self, bucket, object_name, position, data, sse_algorithm, data_algorithm):
headers = dict()
if sse_algorithm:
headers[OSS_SERVER_SIDE_ENCRYPTION] = sse_algorithm
if data_algorithm:
headers[OSS_SERVER_SIDE_DATA_ENCRYPTION] = data_algorithm
result = bucket.append_object(object_name, position, data, headers=headers)
ret_sse_algo = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION)
ret_data_algo = result.headers.get(OSS_SERVER_SIDE_DATA_ENCRYPTION)
ret_kms_key_id = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION_KEY_ID)
return ret_sse_algo, ret_data_algo, ret_kms_key_id
def test_append_object_with_encryption(self):
auth = oss2.Auth(OSS_ID, OSS_SECRET)
bucket_name = OSS_BUCKET + "-test-append-object-data-encryption"
bucket = oss2.Bucket(auth, self.endpoint, bucket_name)
bucket.create_bucket()
# first append
object_name = 'test-append'
sse_algo, data_algo, key_id = self.inner_append_object_with_encryption(bucket, object_name, 0, '123', SERVER_SIDE_ENCRYPTION_KMS, None)
self.assertEqual(SERVER_SIDE_ENCRYPTION_KMS, sse_algo)
self.assertIsNone(data_algo)
self.assertIsNotNone(key_id)
kms_key_id = key_id
# second append with none algorithm
sse_algo, data_algo, key_id = self.inner_append_object_with_encryption(bucket, object_name, 3, '456', None, None)
self.assertEqual(SERVER_SIDE_ENCRYPTION_KMS, sse_algo)
self.assertIsNone(data_algo)
self.assertIsNotNone(key_id)
self.assertEqual(kms_key_id, key_id)
# third append with other algorithm
sse_algo, data_algo, key_id = self.inner_append_object_with_encryption(bucket, object_name, 6, '789', SERVER_SIDE_ENCRYPTION_AES256, None)
self.assertEqual(SERVER_SIDE_ENCRYPTION_KMS, sse_algo)
self.assertIsNone(data_algo)
self.assertIsNotNone(key_id)
self.assertEqual(kms_key_id, key_id)
def test_multipart_upload(self):
auth = oss2.Auth(OSS_ID, OSS_SECRET)
bucket_name = OSS_BUCKET + "-test-multipart-upload-data-encryption"
bucket = oss2.Bucket(auth, self.endpoint, bucket_name)
bucket.create_bucket()
key = 'data-encryption-test-upload-part-object'
filename = self._prepare_temp_file_with_size(1024 * 1024)
headers = dict()
headers[OSS_SERVER_SIDE_ENCRYPTION] = SERVER_SIDE_ENCRYPTION_KMS
headers[OSS_SERVER_SIDE_DATA_ENCRYPTION] = KMS_DATA_ENCRYPTION_SM4
total_size = os.path.getsize(filename)
# Set part size
part_size = determine_part_size(total_size, preferred_size=(100*1024))
# Init multipart with encryption headers.
result = bucket.init_multipart_upload(key, headers=headers)
ret_sse_algo = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION)
ret_data_algo = result.headers.get(OSS_SERVER_SIDE_DATA_ENCRYPTION)
ret_kms_key_id = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION_KEY_ID)
self.assertEqual(SERVER_SIDE_ENCRYPTION_KMS, ret_sse_algo)
self.assertEqual(KMS_DATA_ENCRYPTION_SM4, ret_data_algo)
self.assertIsNotNone(ret_kms_key_id)
kms_key_id = ret_kms_key_id
upload_id = result.upload_id
parts = []
# Upload part with the encyrption headers will be failed.
headers = dict()
headers[OSS_SERVER_SIDE_ENCRYPTION] = SERVER_SIDE_ENCRYPTION_KMS
headers[OSS_SERVER_SIDE_DATA_ENCRYPTION] = KMS_DATA_ENCRYPTION_SM4
with open(filename, 'rb') as fileobj:
part_number = 1
num_to_upload = part_size
self.assertRaises(oss2.exceptions.InvalidArgument, bucket.upload_part, key, upload_id, part_number,
SizedFileAdapter(fileobj, num_to_upload), headers=headers)
# Upload part with none encryption headers.
with open(filename, 'rb') as fileobj:
part_number = 1
offset = 0
while offset < total_size:
num_to_upload = min(part_size, total_size - offset)
result = bucket.upload_part(key, upload_id, part_number,
SizedFileAdapter(fileobj, num_to_upload))
parts.append(PartInfo(part_number, result.etag))
offset += num_to_upload
part_number += 1
ret_sse_algo = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION)
ret_data_algo = result.headers.get(OSS_SERVER_SIDE_DATA_ENCRYPTION)
ret_kms_key_id = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION_KEY_ID)
self.assertEqual(SERVER_SIDE_ENCRYPTION_KMS, ret_sse_algo)
self.assertEqual(KMS_DATA_ENCRYPTION_SM4, ret_data_algo)
self.assertEqual(kms_key_id, ret_kms_key_id)
# Complete multipart upload with encryption headers.
self.assertRaises(oss2.exceptions.InvalidArgument, bucket.complete_multipart_upload, key, upload_id, parts, headers=headers)
# Complete multipart upload with none encryption headers.
result = bucket.complete_multipart_upload(key, upload_id, parts)
self.assertEqual(result.status, 200)
ret_sse_algo = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION)
ret_data_algo = result.headers.get(OSS_SERVER_SIDE_DATA_ENCRYPTION)
ret_kms_key_id = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION_KEY_ID)
self.assertEqual(SERVER_SIDE_ENCRYPTION_KMS, ret_sse_algo)
self.assertEqual(KMS_DATA_ENCRYPTION_SM4, ret_data_algo)
self.assertEqual(kms_key_id, ret_kms_key_id)
bucket.delete_object(key)
def test_resumable_uoload(self):
auth = oss2.Auth(OSS_ID, OSS_SECRET)
bucket_name = OSS_BUCKET + "-test-resumable-upload-data-encryption"
bucket = oss2.Bucket(auth, self.endpoint, bucket_name)
bucket.create_bucket()
small_object = 'requestpayment-test-resumable-upload-small-object'
big_object = 'requestpayment-test-resumable-upload-big-object'
# Create tmp file smaller than multipart_threshold
file_name = self._prepare_temp_file_with_size(150 * 1024)
# Resumale upload small object
headers = dict()
headers[OSS_SERVER_SIDE_ENCRYPTION] = SERVER_SIDE_ENCRYPTION_KMS
headers[OSS_SERVER_SIDE_DATA_ENCRYPTION] = KMS_DATA_ENCRYPTION_SM4
result = oss2.resumable_upload(bucket, small_object, file_name,
multipart_threshold=(200 * 1024), num_threads=2, part_size=(100 * 1024),
headers=headers)
ret_sse_algo = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION)
ret_data_algo = result.headers.get(OSS_SERVER_SIDE_DATA_ENCRYPTION)
ret_kms_key_id = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION_KEY_ID)
self.assertEqual(SERVER_SIDE_ENCRYPTION_KMS, ret_sse_algo)
self.assertEqual(KMS_DATA_ENCRYPTION_SM4, ret_data_algo)
self.assertIsNotNone(ret_kms_key_id)
self.assertEqual(result.status, 200)
bucket.delete_object(small_object)
# Start big file test
# Create big file bigger than multipart_threshold
file_name = self._prepare_temp_file_with_size(11 * 1024 * 1024)
headers = dict()
headers[OSS_SERVER_SIDE_ENCRYPTION] = SERVER_SIDE_ENCRYPTION_KMS
headers[OSS_SERVER_SIDE_DATA_ENCRYPTION] = KMS_DATA_ENCRYPTION_SM4
result = oss2.resumable_upload(bucket, big_object, file_name,
multipart_threshold=(200 * 1024), num_threads=2, part_size=(100 * 1024),
headers=headers)
self.assertEqual(result.status, 200)
self.assertEqual(SERVER_SIDE_ENCRYPTION_KMS, ret_sse_algo)
self.assertEqual(KMS_DATA_ENCRYPTION_SM4, ret_data_algo)
self.assertIsNotNone(ret_kms_key_id)
self.assertEqual(result.status, 200)
bucket.delete_object(big_object)
def test_copy_object(self):
auth = oss2.Auth(OSS_ID, OSS_SECRET)
bucket_name = OSS_BUCKET + "-test-copy-object-data-encryption"
bucket = oss2.Bucket(auth, self.endpoint, bucket_name)
bucket.create_bucket()
object_name = 'test-copy-object-src'
data = b'a' * 1024
user_header_key = 'x-oss-meta-user1'
user_header_value = 'user_value'
headers = dict()
headers[user_header_key] = user_header_value
headers[OSS_SERVER_SIDE_ENCRYPTION] = SERVER_SIDE_ENCRYPTION_KMS
headers[OSS_SERVER_SIDE_DATA_ENCRYPTION] = KMS_DATA_ENCRYPTION_SM4
result = bucket.put_object(object_name, data, headers=headers)
sse_algo = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION)
data_algo = result.headers.get(OSS_SERVER_SIDE_DATA_ENCRYPTION)
kms_key_id = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION_KEY_ID)
self.assertEqual(SERVER_SIDE_ENCRYPTION_KMS, sse_algo)
self.assertEqual(KMS_DATA_ENCRYPTION_SM4, data_algo)
self.assertIsNotNone(kms_key_id)
result = bucket.head_object(object_name)
sse_algo = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION)
data_algo = result.headers.get(OSS_SERVER_SIDE_DATA_ENCRYPTION)
kms_key_id = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION_KEY_ID)
ret_value = result.headers.get(user_header_key)
self.assertEqual(SERVER_SIDE_ENCRYPTION_KMS, sse_algo)
self.assertEqual(KMS_DATA_ENCRYPTION_SM4, data_algo)
self.assertIsNotNone(kms_key_id)
self.assertEqual(user_header_value, ret_value)
# test normal copy objects
dest_name = 'test-copy-object-dest'
bucket.copy_object(bucket_name, object_name, dest_name)
result = bucket.head_object(dest_name)
ret_sse_algo = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION)
ret_data_algo = result.headers.get(OSS_SERVER_SIDE_DATA_ENCRYPTION)
ret_kms_key_id = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION_KEY_ID)
self.assertIsNone(ret_sse_algo)
self.assertIsNone(ret_data_algo)
self.assertIsNone(ret_kms_key_id)
# test copy object with specified encryption headers
headers = dict()
headers[user_header_key] = user_header_value
headers[OSS_SERVER_SIDE_ENCRYPTION] = SERVER_SIDE_ENCRYPTION_AES256
bucket.copy_object(bucket_name, object_name, dest_name, headers=headers)
result = bucket.head_object(dest_name)
sse_algo = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION)
data_algo = result.headers.get(OSS_SERVER_SIDE_DATA_ENCRYPTION)
kms_key_id = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION_KEY_ID)
self.assertEqual(SERVER_SIDE_ENCRYPTION_AES256, sse_algo)
self.assertIsNone(data_algo)
self.assertIsNone(kms_key_id)
if __name__ == '__main__':
unittest.main()
|
mit
|
lancezlin/ml_template_py
|
lib/python2.7/site-packages/numpy/core/generate_numpy_api.py
|
32
|
7417
|
from __future__ import division, print_function
import os
import genapi
from genapi import \
TypeApi, GlobalVarApi, FunctionApi, BoolValuesApi
import numpy_api
# use annotated api when running under cpychecker
h_template = r"""
#if defined(_MULTIARRAYMODULE) || defined(WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE)
typedef struct {
PyObject_HEAD
npy_bool obval;
} PyBoolScalarObject;
extern NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type;
extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type;
extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
%s
#else
#if defined(PY_ARRAY_UNIQUE_SYMBOL)
#define PyArray_API PY_ARRAY_UNIQUE_SYMBOL
#endif
#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY)
extern void **PyArray_API;
#else
#if defined(PY_ARRAY_UNIQUE_SYMBOL)
void **PyArray_API;
#else
static void **PyArray_API=NULL;
#endif
#endif
%s
#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT)
static int
_import_array(void)
{
int st;
PyObject *numpy = PyImport_ImportModule("numpy.core.multiarray");
PyObject *c_api = NULL;
if (numpy == NULL) {
PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import");
return -1;
}
c_api = PyObject_GetAttrString(numpy, "_ARRAY_API");
Py_DECREF(numpy);
if (c_api == NULL) {
PyErr_SetString(PyExc_AttributeError, "_ARRAY_API not found");
return -1;
}
#if PY_VERSION_HEX >= 0x03000000
if (!PyCapsule_CheckExact(c_api)) {
PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object");
Py_DECREF(c_api);
return -1;
}
PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL);
#else
if (!PyCObject_Check(c_api)) {
PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCObject object");
Py_DECREF(c_api);
return -1;
}
PyArray_API = (void **)PyCObject_AsVoidPtr(c_api);
#endif
Py_DECREF(c_api);
if (PyArray_API == NULL) {
PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer");
return -1;
}
/* Perform runtime check of C API version */
if (NPY_VERSION != PyArray_GetNDArrayCVersion()) {
PyErr_Format(PyExc_RuntimeError, "module compiled against "\
"ABI version 0x%%x but this version of numpy is 0x%%x", \
(int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion());
return -1;
}
if (NPY_FEATURE_VERSION > PyArray_GetNDArrayCFeatureVersion()) {
PyErr_Format(PyExc_RuntimeError, "module compiled against "\
"API version 0x%%x but this version of numpy is 0x%%x", \
(int) NPY_FEATURE_VERSION, (int) PyArray_GetNDArrayCFeatureVersion());
return -1;
}
/*
* Perform runtime check of endianness and check it matches the one set by
* the headers (npy_endian.h) as a safeguard
*/
st = PyArray_GetEndianness();
if (st == NPY_CPU_UNKNOWN_ENDIAN) {
PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as unknown endian");
return -1;
}
#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN
if (st != NPY_CPU_BIG) {
PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\
"big endian, but detected different endianness at runtime");
return -1;
}
#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN
if (st != NPY_CPU_LITTLE) {
PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\
"little endian, but detected different endianness at runtime");
return -1;
}
#endif
return 0;
}
#if PY_VERSION_HEX >= 0x03000000
#define NUMPY_IMPORT_ARRAY_RETVAL NULL
#else
#define NUMPY_IMPORT_ARRAY_RETVAL
#endif
#define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return NUMPY_IMPORT_ARRAY_RETVAL; } }
#define import_array1(ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return ret; } }
#define import_array2(msg, ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; } }
#endif
#endif
"""
c_template = r"""
/* These pointers will be stored in the C-object for use in other
extension modules
*/
void *PyArray_API[] = {
%s
};
"""
c_api_header = """
===========
Numpy C-API
===========
"""
def generate_api(output_dir, force=False):
basename = 'multiarray_api'
h_file = os.path.join(output_dir, '__%s.h' % basename)
c_file = os.path.join(output_dir, '__%s.c' % basename)
d_file = os.path.join(output_dir, '%s.txt' % basename)
targets = (h_file, c_file, d_file)
sources = numpy_api.multiarray_api
if (not force and not genapi.should_rebuild(targets, [numpy_api.__file__, __file__])):
return targets
else:
do_generate_api(targets, sources)
return targets
def do_generate_api(targets, sources):
header_file = targets[0]
c_file = targets[1]
doc_file = targets[2]
global_vars = sources[0]
scalar_bool_values = sources[1]
types_api = sources[2]
multiarray_funcs = sources[3]
multiarray_api = sources[:]
module_list = []
extension_list = []
init_list = []
# Check multiarray api indexes
multiarray_api_index = genapi.merge_api_dicts(multiarray_api)
genapi.check_api_dict(multiarray_api_index)
numpyapi_list = genapi.get_api_functions('NUMPY_API',
multiarray_funcs)
ordered_funcs_api = genapi.order_dict(multiarray_funcs)
# Create dict name -> *Api instance
api_name = 'PyArray_API'
multiarray_api_dict = {}
for f in numpyapi_list:
name = f.name
index = multiarray_funcs[name][0]
annotations = multiarray_funcs[name][1:]
multiarray_api_dict[f.name] = FunctionApi(f.name, index, annotations,
f.return_type,
f.args, api_name)
for name, val in global_vars.items():
index, type = val
multiarray_api_dict[name] = GlobalVarApi(name, index, type, api_name)
for name, val in scalar_bool_values.items():
index = val[0]
multiarray_api_dict[name] = BoolValuesApi(name, index, api_name)
for name, val in types_api.items():
index = val[0]
multiarray_api_dict[name] = TypeApi(name, index, 'PyTypeObject', api_name)
if len(multiarray_api_dict) != len(multiarray_api_index):
raise AssertionError("Multiarray API size mismatch %d %d" %
(len(multiarray_api_dict), len(multiarray_api_index)))
extension_list = []
for name, index in genapi.order_dict(multiarray_api_index):
api_item = multiarray_api_dict[name]
extension_list.append(api_item.define_from_array_api_string())
init_list.append(api_item.array_api_define())
module_list.append(api_item.internal_define())
# Write to header
fid = open(header_file, 'w')
s = h_template % ('\n'.join(module_list), '\n'.join(extension_list))
fid.write(s)
fid.close()
# Write to c-code
fid = open(c_file, 'w')
s = c_template % ',\n'.join(init_list)
fid.write(s)
fid.close()
# write to documentation
fid = open(doc_file, 'w')
fid.write(c_api_header)
for func in numpyapi_list:
fid.write(func.to_ReST())
fid.write('\n\n')
fid.close()
return targets
|
mit
|
drincruz/luigi
|
examples/top_artists.py
|
66
|
9167
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
from collections import defaultdict
from heapq import nlargest
from luigi import six
import luigi
import luigi.contrib.hadoop
import luigi.contrib.hdfs
import luigi.postgres
class ExternalStreams(luigi.ExternalTask):
"""
Example of a possible external data dump
To depend on external targets (typically at the top of your dependency graph), you can define
an ExternalTask like this.
"""
date = luigi.DateParameter()
def output(self):
"""
Returns the target output for this task.
In this case, it expects a file to be present in HDFS.
:return: the target output for this task.
:rtype: object (:py:class:`luigi.target.Target`)
"""
return luigi.contrib.hdfs.HdfsTarget(self.date.strftime('data/streams_%Y-%m-%d.tsv'))
class Streams(luigi.Task):
"""
Faked version right now, just generates bogus data.
"""
date = luigi.DateParameter()
def run(self):
"""
Generates bogus data and writes it into the :py:meth:`~.Streams.output` target.
"""
with self.output().open('w') as output:
for _ in range(1000):
output.write('{} {} {}\n'.format(
random.randint(0, 999),
random.randint(0, 999),
random.randint(0, 999)))
def output(self):
"""
Returns the target output for this task.
In this case, a successful execution of this task will create a file in the local file system.
:return: the target output for this task.
:rtype: object (:py:class:`luigi.target.Target`)
"""
return luigi.LocalTarget(self.date.strftime('data/streams_%Y_%m_%d_faked.tsv'))
class StreamsHdfs(Streams):
"""
This task performs the same work as :py:class:`~.Streams` but its output is written to HDFS.
This class uses :py:meth:`~.Streams.run` and
overrides :py:meth:`~.Streams.output` so redefine HDFS as its target.
"""
def output(self):
"""
Returns the target output for this task.
In this case, a successful execution of this task will create a file in HDFS.
:return: the target output for this task.
:rtype: object (:py:class:`luigi.target.Target`)
"""
return luigi.contrib.hdfs.HdfsTarget(self.date.strftime('data/streams_%Y_%m_%d_faked.tsv'))
class AggregateArtists(luigi.Task):
"""
This task runs over the target data returned by :py:meth:`~/.Streams.output` and
writes the result into its :py:meth:`~.AggregateArtists.output` target (local file).
"""
date_interval = luigi.DateIntervalParameter()
def output(self):
"""
Returns the target output for this task.
In this case, a successful execution of this task will create a file on the local filesystem.
:return: the target output for this task.
:rtype: object (:py:class:`luigi.target.Target`)
"""
return luigi.LocalTarget("data/artist_streams_{}.tsv".format(self.date_interval))
def requires(self):
"""
This task's dependencies:
* :py:class:`~.Streams`
:return: list of object (:py:class:`luigi.task.Task`)
"""
return [Streams(date) for date in self.date_interval]
def run(self):
artist_count = defaultdict(int)
for t in self.input():
with t.open('r') as in_file:
for line in in_file:
_, artist, track = line.strip().split()
artist_count[artist] += 1
with self.output().open('w') as out_file:
for artist, count in six.iteritems(artist_count):
out_file.write('{}\t{}\n'.format(artist, count))
class AggregateArtistsHadoop(luigi.contrib.hadoop.JobTask):
"""
This task runs a :py:class:`luigi.contrib.hadoop.JobTask` task
over each target data returned by :py:meth:`~/.StreamsHdfs.output` and
writes the result into its :py:meth:`~.AggregateArtistsHadoop.output` target (a file in HDFS).
This class uses :py:meth:`luigi.contrib.spark.SparkJob.run`.
"""
date_interval = luigi.DateIntervalParameter()
def output(self):
"""
Returns the target output for this task.
In this case, a successful execution of this task will create a file in HDFS.
:return: the target output for this task.
:rtype: object (:py:class:`luigi.target.Target`)
"""
return luigi.contrib.hdfs.HdfsTarget(
"data/artist_streams_%s.tsv" % self.date_interval,
format=luigi.contrib.hdfs.PlainDir
)
def requires(self):
"""
This task's dependencies:
* :py:class:`~.StreamsHdfs`
:return: list of object (:py:class:`luigi.task.Task`)
"""
return [StreamsHdfs(date) for date in self.date_interval]
def mapper(self, line):
"""
The implementation of the map phase of the Hadoop job.
:param line: the input.
:return: tuple ((key, value) or, in this case, (artist, 1 stream count))
"""
_, artist, _ = line.strip().split()
yield artist, 1
def reducer(self, key, values):
"""
The implementation of the reducer phase of the Hadoop job.
:param key: the artist.
:param values: the stream count.
:return: tuple (artist, count of streams)
"""
yield key, sum(values)
class Top10Artists(luigi.Task):
"""
This task runs over the target data returned by :py:meth:`~/.AggregateArtists.output` or
:py:meth:`~/.AggregateArtistsHadoop.output` in case :py:attr:`~/.Top10Artists.use_hadoop` is set and
writes the result into its :py:meth:`~.Top10Artists.output` target (a file in local filesystem).
"""
date_interval = luigi.DateIntervalParameter()
use_hadoop = luigi.BoolParameter()
def requires(self):
"""
This task's dependencies:
* :py:class:`~.AggregateArtists` or
* :py:class:`~.AggregateArtistsHadoop` if :py:attr:`~/.Top10Artists.use_hadoop` is set.
:return: object (:py:class:`luigi.task.Task`)
"""
if self.use_hadoop:
return AggregateArtistsHadoop(self.date_interval)
else:
return AggregateArtists(self.date_interval)
def output(self):
"""
Returns the target output for this task.
In this case, a successful execution of this task will create a file on the local filesystem.
:return: the target output for this task.
:rtype: object (:py:class:`luigi.target.Target`)
"""
return luigi.LocalTarget("data/top_artists_%s.tsv" % self.date_interval)
def run(self):
top_10 = nlargest(10, self._input_iterator())
with self.output().open('w') as out_file:
for streams, artist in top_10:
out_line = '\t'.join([
str(self.date_interval.date_a),
str(self.date_interval.date_b),
artist,
str(streams)
])
out_file.write((out_line + '\n'))
def _input_iterator(self):
with self.input().open('r') as in_file:
for line in in_file:
artist, streams = line.strip().split()
yield int(streams), artist
class ArtistToplistToDatabase(luigi.postgres.CopyToTable):
"""
This task runs a :py:class:`luigi.postgres.CopyToTable` task
over the target data returned by :py:meth:`~/.Top10Artists.output` and
writes the result into its :py:meth:`~.ArtistToplistToDatabase.output` target which,
by default, is :py:class:`luigi.postgres.PostgresTarget` (a table in PostgreSQL).
This class uses :py:meth:`luigi.postgres.CopyToTable.run` and :py:meth:`luigi.postgres.CopyToTable.output`.
"""
date_interval = luigi.DateIntervalParameter()
use_hadoop = luigi.BoolParameter()
host = "localhost"
database = "toplists"
user = "luigi"
password = "abc123" # ;)
table = "top10"
columns = [("date_from", "DATE"),
("date_to", "DATE"),
("artist", "TEXT"),
("streams", "INT")]
def requires(self):
"""
This task's dependencies:
* :py:class:`~.Top10Artists`
:return: list of object (:py:class:`luigi.task.Task`)
"""
return Top10Artists(self.date_interval, self.use_hadoop)
if __name__ == "__main__":
luigi.run()
|
apache-2.0
|
tlodge/homehub.nox
|
src/nox/webapps/webserver/dummywebpage.py
|
10
|
5957
|
# Copyright 2008 (C) Nicira, Inc.
#
# This file is part of NOX.
#
# NOX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NOX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NOX. If not, see <http://www.gnu.org/licenses/>.
"""Example web page served through the webserver component.
This example component demonstrates serving both dynamically and
statically generated content using the webserver component.
Dynamic content is served by subclassing the TwistedWeb Resource class
(defined in twisted.web.resource.Resource), providing implementations
for methods named "render_<HTTP Request Method>" to take any desired
actions and return appropriate content, and registering that class for
a specific URL using the install_resource method of the webserver
component. The DummyPageResource class defined below illustrates a
trivial implementation of a TwistedWeb Resource subclass serving a
very simple page in response to a GET request. The dummywebpage class
below, implementing the dummywebpage component, registers a
DummyPageInstance resource its install method.
Static content comes from a static_www subdirectory in a component's
directory. The subdirectory tree under this directory will be made
available below a URL of the form:
/static/<buildnr>/<module path>/
where:
<buildnr> is the build number given to this software version. If
a build number is not specifically provided when configure is
run, it defaults to zero. Since it changes on a build by
build basis, the safest method is to dynamically generate
this part of the path. The example page generated by the
DummyPageResource class below demonstrates this.
<module_path> is the path to the module. This module path for
this component is "nox/webapps/webserver". Other components
will have different module paths, depending on where they
reside in the NOX source tree.
Dynamic page resources should NOT be installed on paths within the
static tree.
If creating your own module using this one as a template, be sure to
look at the following related files in this component directory as
well:
meta.xml: This file records the component dependencies. Refer to
the section for the dummywebpage component to setup your own
component.
Makefile.am: If the new component is in a new subdirectory, a
Makefile.am will be required for it. Refer to the one in
this component's directory. Particularly important for
proper operation of web content is correct defintion of the
MODULE, MODULE_BASE_PATH, and MODULE_BASE_ESCAPE variables.
The dependencies on the all-local, clean-local, and
install-exec-hook targets are required, but the commands
under the dependency declaration line should NOT be copied
into the new Makefile.am.
If the proper dependencies are specified in meta.xml, NOX can be
started with the webserver serving that content by simply ensuring the
component name is included on the nox command line. In this example,
to start NOX so that it just server the dummy page, start nox as:
nox_core dummywebpage
By default, the webserver will attempt to run on port 80 and 443,
which will fail if run as a regular user. To get an unencrypted
server to run on a non-standard port, use a command like:
nox_core webserver=port=8888 dummywebpage
"""
from nox.coreapps.pyrt.pycomponent import *
from nox.lib.core import *
from nox.webapps.webserver import webserver
from twisted.web.resource import Resource
class DummyPageResource(Resource):
def __init__(self, component):
# Using component reference to get at webserver component to
# determine the base path for static content.
self.component = component
# If generating a lot of dynamic content, a templating system is a
# very helpful. We know of successful use of this framework with
# the Mako templating system, but any system available and
# familiar should work fine.
def render_GET(self, request):
return """\
<html>
<head><title>NOX Dummy Web Page</title></head>
<body>
<h1>NOX Dummy Web Page</h1>
<p>Congratulations, the NOX web server is working.</p>
<img src="%s/nox/webapps/webserver/happy_face.png" alt="Happy face image from static content.">
</body>
</html>
""" % (self.component.webserver.siteConfig["staticBase"],)
class dummywebpage(Component):
def __init__(self, ctxt):
Component.__init__(self, ctxt)
self.webserver = None
def install(self):
# Get a reference to the webserver component
self.webserver = self.resolve(str(webserver.webserver))
# tell webserver that all authentication initialization is
# done. This is a wart to allow pluggable authentication
self.webserver.authui_initialized = True
# Install a dynamically generated page
self.webserver.install_resource("/dummy.html", DummyPageResource(self))
# Set the default web server URL served when "/" is requested.
# This should only be called by one component or the final
# result will be dependent on component load order, which can
# vary.
self.webserver.default_uri = "/dummy.html"
def getInterface(self):
return str(dummywebpage)
def getFactory():
class Factory:
def instance(self, ctxt):
return dummywebpage(ctxt)
return Factory()
|
gpl-3.0
|
eduNEXT/edunext-platform
|
lms/djangoapps/ccx/migrations/0005_change_ccx_coach_to_staff.py
|
5
|
3525
|
# -*- coding: utf-8 -*-
import logging
import six
from ccx_keys.locator import CCXLocator
from django.contrib.auth.models import User
from django.db import migrations
from django.http import Http404
from lms.djangoapps.courseware.courses import get_course_by_id
from lms.djangoapps.instructor.access import allow_access, revoke_access
log = logging.getLogger("edx.ccx")
def change_existing_ccx_coaches_to_staff(apps, schema_editor):
"""
Modify all coaches of CCX courses so that they have the staff role on the
CCX course they coach, but retain the CCX Coach role on the parent course.
Arguments:
apps (Applications): Apps in edX platform.
schema_editor (SchemaEditor): For editing database schema (unused)
"""
CustomCourseForEdX = apps.get_model('ccx', 'CustomCourseForEdX')
db_alias = schema_editor.connection.alias
if not db_alias == 'default':
# This migration is not intended to run against the student_module_history database and
# will fail if it does. Ensure that it'll only run against the default database.
return
list_ccx = CustomCourseForEdX.objects.using(db_alias).all()
for ccx in list_ccx:
ccx_locator = CCXLocator.from_course_locator(ccx.course_id, six.text_type(ccx.id))
try:
course = get_course_by_id(ccx_locator)
except Http404:
log.error('Could not migrate access for CCX course: %s', six.text_type(ccx_locator))
else:
coach = User.objects.get(id=ccx.coach.id)
allow_access(course, coach, 'staff', send_email=False)
revoke_access(course, coach, 'ccx_coach', send_email=False)
log.info(
'The CCX coach of CCX %s has been switched from "CCX Coach" to "Staff".',
six.text_type(ccx_locator)
)
def revert_ccx_staff_to_coaches(apps, schema_editor):
"""
Modify all staff on CCX courses so that they no longer have the staff role
on the course that they coach.
Arguments:
apps (Applications): Apps in edX platform.
schema_editor (SchemaEditor): For editing database schema (unused)
"""
CustomCourseForEdX = apps.get_model('ccx', 'CustomCourseForEdX')
db_alias = schema_editor.connection.alias
if not db_alias == 'default':
return
list_ccx = CustomCourseForEdX.objects.using(db_alias).all()
for ccx in list_ccx:
ccx_locator = CCXLocator.from_course_locator(ccx.course_id, six.text_type(ccx.id))
try:
course = get_course_by_id(ccx_locator)
except Http404:
log.error('Could not migrate access for CCX course: %s', six.text_type(ccx_locator))
else:
coach = User.objects.get(id=ccx.coach.id)
allow_access(course, coach, 'ccx_coach', send_email=False)
revoke_access(course, coach, 'staff', send_email=False)
log.info(
'The CCX coach of CCX %s has been switched from "Staff" to "CCX Coach".',
six.text_type(ccx_locator)
)
class Migration(migrations.Migration):
dependencies = [
('ccx', '0001_initial'),
('ccx', '0002_customcourseforedx_structure_json'),
('ccx', '0003_add_master_course_staff_in_ccx'),
('ccx', '0004_seed_forum_roles_in_ccx_courses'),
]
operations = [
migrations.RunPython(
code=change_existing_ccx_coaches_to_staff,
reverse_code=revert_ccx_staff_to_coaches
)
]
|
agpl-3.0
|
pythonprobr/pypratico
|
gui/rc4.py
|
1
|
1556
|
#!/usr/bin/env python3
def rc4(chave, entrada, loops=1):
''' Algoritmo compatível com o RC4 '''
kbox = list(range(256)) # criar caixa para chave
for i, car in enumerate(chave): # copiar chave e vetor
kbox[i] = car
j = len(chave)
for i in range(j, 256): # repetir ate preencher
kbox[i] = kbox[i-j]
# [1] inicializar sbox
sbox = list(range(256)) # criar e inicializar caixa de substituicao
j = 0
# repetir o embaralhamento da sbox, conforme recomendado em
# CipherSaber-2: http://ciphersaber.gurus.com/faq.html#cs2
for k in range(loops):
for i in range(256):
j = (j + sbox[i] + kbox[i] ) % 256
sbox[i], sbox[j] = sbox[j], sbox[i]
# LOOP PRINCIPAL
i = 0
j = 0
saida = bytearray()
for car in entrada:
i = (i + 1) % 256
# [2] embaralhar sbox
j = (j + sbox[i]) % 256
sbox[i], sbox[j] = sbox[j], sbox[i]
# [3] calcular t
t = (sbox[i] + sbox[j]) % 256
k = sbox[t]
car = car ^ k
saida.append(car)
return saida
def _testes():
from time import time
claro = bytearray(b'1234567890' * 100000)
t0 = time()
cifrado = rc4(b'chave', claro)
print("tempo acumulado: %.2f s" % (time() - t0))
resultado = rc4(b'chave', cifrado)
assert resultado == claro, '%r != %r' % (resultado, claro)
print("tempo acumulado: %.2f s" % (time() - t0))
print('OK')
if __name__=='__main__':
_testes()
|
gpl-2.0
|
alexpilotti/python-keystoneclient
|
keystoneclient/v3/contrib/oauth1/core.py
|
8
|
2565
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keystoneclient.i18n import _
from keystoneclient.v3.contrib.oauth1 import access_tokens
from keystoneclient.v3.contrib.oauth1 import consumers
from keystoneclient.v3.contrib.oauth1 import request_tokens
def create_oauth_manager(self):
# NOTE(stevemar): Attempt to import the oauthlib package at this point.
try:
import oauthlib # noqa
# NOTE(stevemar): Return an object instead of raising an exception here,
# this will allow users to see an exception only when trying to access the
# oauth portions of client. Otherwise an exception would be raised
# when the client is created.
except ImportError:
return OAuthManagerOptionalImportProxy()
else:
return OAuthManager(self)
class OAuthManager(object):
def __init__(self, api):
self.access_tokens = access_tokens.AccessTokenManager(api)
self.consumers = consumers.ConsumerManager(api)
self.request_tokens = request_tokens.RequestTokenManager(api)
class OAuthManagerOptionalImportProxy(object):
"""Act as a proxy manager in case oauthlib is no installed.
This class will only be created if oauthlib is not in the system,
trying to access any of the attributes in name (access_tokens,
consumers, request_tokens), will result in a NotImplementedError,
and a message.
>>> manager.access_tokens.blah
NotImplementedError: To use 'access_tokens' oauthlib must be installed
Otherwise, if trying to access an attribute other than the ones in name,
the manager will state that the attribute does not exist.
>>> manager.dne.blah
AttributeError: 'OAuthManagerOptionalImportProxy' object has no
attribute 'dne'
"""
def __getattribute__(self, name):
if name in ('access_tokens', 'consumers', 'request_tokens'):
raise NotImplementedError(
_('To use %r oauthlib must be installed') % name)
return super(OAuthManagerOptionalImportProxy,
self).__getattribute__(name)
|
apache-2.0
|
Shengliang/mbed
|
workspace_tools/host_tests/host_tests_plugins/module_copy_mps2.py
|
55
|
5528
|
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import os, shutil
from os.path import join
from host_test_plugins import HostTestPluginBase
from time import sleep
class HostTestPluginCopyMethod_MPS2(HostTestPluginBase):
# MPS2 specific flashing / binary setup funcitons
def mps2_set_board_image_file(self, disk, images_cfg_path, image0file_path, image_name='images.txt'):
""" This function will alter image cfg file.
Main goal of this function is to change number of images to 1, comment all
existing image entries and append at the end of file new entry with test path.
@return True when all steps succeed.
"""
MBED_SDK_TEST_STAMP = 'test suite entry'
image_path = join(disk, images_cfg_path, image_name)
new_file_lines = [] # New configuration file lines (entries)
# Check each line of the image configuration file
try:
with open(image_path, 'r') as file:
for line in file:
if re.search('^TOTALIMAGES', line):
# Check number of total images, should be 1
new_file_lines.append(re.sub('^TOTALIMAGES:[\t ]*[\d]+', 'TOTALIMAGES: 1', line))
elif re.search('; - %s[\n\r]*$'% MBED_SDK_TEST_STAMP, line):
# Look for test suite entries and remove them
pass # Omit all test suite entries
elif re.search('^IMAGE[\d]+FILE', line):
# Check all image entries and mark the ';'
new_file_lines.append(';' + line) # Comment non test suite lines
else:
# Append line to new file
new_file_lines.append(line)
except IOError as e:
return False
# Add new image entry with proper commented stamp
new_file_lines.append('IMAGE0FILE: %s ; - %s\r\n'% (image0file_path, MBED_SDK_TEST_STAMP))
# Write all lines to file
try:
with open(image_path, 'w') as file:
for line in new_file_lines:
file.write(line),
except IOError:
return False
return True
def mps2_select_core(self, disk, mobo_config_name=""):
""" Function selects actual core
"""
# TODO: implement core selection
pass
def mps2_switch_usb_auto_mounting_after_restart(self, disk, usb_config_name=""):
""" Function alters configuration to allow USB MSD to be mounted after restarts
"""
# TODO: implement USB MSD restart detection
pass
def copy_file(self, file, disk):
if not file:
return
_, ext = os.path.splitext(file)
ext = ext.lower()
dfile = disk + "/SOFTWARE/mbed" + ext
if os.path.isfile(dfile):
print('Remove old binary %s' % dfile)
os.remove(dfile)
shutil.copy(file, dfile)
return True
def touch_file(self, file):
""" Touch file and set timestamp to items
"""
tfile = file+'.tmp'
fhandle = open(tfile, 'a')
try:
fhandle.close()
finally:
os.rename(tfile, file)
return True
# Plugin interface
name = 'HostTestPluginCopyMethod_MPS2'
type = 'CopyMethod'
capabilities = ['mps2-copy']
required_parameters = ['image_path', 'destination_disk']
def setup(self, *args, **kwargs):
""" Configure plugin, this function should be called before plugin execute() method is used.
"""
return True
def execute(self, capabilitity, *args, **kwargs):
""" Executes capability by name.
Each capability may directly just call some command line
program or execute building pythonic function
"""
result = False
if self.check_parameters(capabilitity, *args, **kwargs) is True:
file = kwargs['image_path']
disk = kwargs['destination_disk']
""" Add a delay in case there a test just finished
Prevents interface firmware hiccups
"""
sleep(20)
if capabilitity == 'mps2-copy' and self.copy_file(file, disk):
sleep(3)
if self.touch_file(disk + 'reboot.txt'):
""" Add a delay after the board was rebooted.
The actual reboot time is 20 seconds, but using 15 seconds
allows us to open the COM port and save a board reset.
This also prevents interface firmware hiccups.
"""
sleep(7)
result = True
return result
def load_plugin():
""" Returns plugin available in this module
"""
return HostTestPluginCopyMethod_MPS2()
|
apache-2.0
|
kaspermunch/MultiPurpose
|
MultiPurpose/NWAlign/DPMatrix.py
|
1
|
1966
|
"""DPMatrix.py
This class models a dynamic progamming matrix for use in sequence
alignment models. The design of this class is based on the description of
dynamic programming matrices in:
Durbin et al. 1998. Biological Sequence Analysis. Cambridge University
Press.
Classes:
* DPMatrix
* DPMatrixCell
"""
class DPMatrix:
"""A generic Dynmaic Programming matrix.
This is an abstract base class and should be inherited to provide a
specific implementation of an algorithm.
"""
def __init__(self, sub_matrix):
"""Initialize with a substitution matrix to use for alignments.
Arguments:
* sub_matrix - An initialized substitution matrix from the
'Substitution Matrix' class.
"""
self.sub_matrix = sub_matrix
self.dpmatrix = {}
def fill_cell(self, column, row):
pass
def fill_matrix(self, sequence1, sequence2):
"""Fill the dpmatrix via a 'pull' recursion method."""
self.seq1 = sequence1
self.seq2 = sequence2
last_cell = self.fill_cell(len(sequence1), len(sequence2))
self.dpmatrix[(len(sequence1), len(sequence2))] = last_cell
class DPMatrixCell:
"""An individual cell in a DPMatrix.
"""
def __init__(self, col_pos, row_pos, seq1item, seq2item):
self.col_pos = col_pos
self.row_pos = row_pos
self.seq1item = seq1item
self.seq2item = seq2item
self.value = None
self.parent_cell = None
def set_value(self, value):
self.value = value
def set_parent(self, parent):
self.parent_cell = parent
def get_value(self):
if self.value == None:
raise IndexError('Value not set for this matrix cell.')
return self.value
def get_parent(self):
return self.parent_cell
|
gpl-2.0
|
molobrakos/home-assistant
|
homeassistant/components/emulated_roku/config_flow.py
|
10
|
1964
|
"""Config flow to configure emulated_roku component."""
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_NAME
from homeassistant.core import callback
from .const import CONF_LISTEN_PORT, DEFAULT_NAME, DEFAULT_PORT, DOMAIN
@callback
def configured_servers(hass):
"""Return a set of the configured servers."""
return set(entry.data[CONF_NAME] for entry
in hass.config_entries.async_entries(DOMAIN))
@config_entries.HANDLERS.register(DOMAIN)
class EmulatedRokuFlowHandler(config_entries.ConfigFlow):
"""Handle an emulated_roku config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
errors = {}
if user_input is not None:
name = user_input[CONF_NAME]
if name in configured_servers(self.hass):
return self.async_abort(reason='name_exists')
return self.async_create_entry(
title=name,
data=user_input
)
servers_num = len(configured_servers(self.hass))
if servers_num:
default_name = "{} {}".format(DEFAULT_NAME, servers_num + 1)
default_port = DEFAULT_PORT + servers_num
else:
default_name = DEFAULT_NAME
default_port = DEFAULT_PORT
return self.async_show_form(
step_id='user',
data_schema=vol.Schema({
vol.Required(CONF_NAME,
default=default_name): str,
vol.Required(CONF_LISTEN_PORT,
default=default_port): vol.Coerce(int)
}),
errors=errors
)
async def async_step_import(self, import_config):
"""Handle a flow import."""
return await self.async_step_user(import_config)
|
apache-2.0
|
heidsoft/VirtualBox
|
src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/PatchPcdValue/PatchPcdValue.py
|
11
|
10521
|
## @file
# Patch value into the binary file.
#
# Copyright (c) 2010, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import os
import sys
import re
from optparse import OptionParser
from optparse import make_option
from Common.BuildToolError import *
import Common.EdkLogger as EdkLogger
from Common.BuildVersion import gBUILD_VERSION
import array
# Version and Copyright
__version_number__ = ("0.10" + " " + gBUILD_VERSION)
__version__ = "%prog Version " + __version_number__
__copyright__ = "Copyright (c) 2010, Intel Corporation. All rights reserved."
## PatchBinaryFile method
#
# This method mainly patches the data into binary file.
#
# @param FileName File path of the binary file
# @param ValueOffset Offset value
# @param TypeName DataType Name
# @param Value Value String
# @param MaxSize MaxSize value
#
# @retval 0 File is updated successfully.
# @retval not 0 File is updated failed.
#
def PatchBinaryFile(FileName, ValueOffset, TypeName, ValueString, MaxSize=0):
#
# Length of Binary File
#
FileHandle = open (FileName, 'rb')
FileHandle.seek (0, 2)
FileLength = FileHandle.tell()
FileHandle.close()
#
# Unify string to upper string
#
TypeName = TypeName.upper()
#
# Get PCD value data length
#
ValueLength = 0
if TypeName == 'BOOLEAN':
ValueLength = 1
elif TypeName == 'UINT8':
ValueLength = 1
elif TypeName == 'UINT16':
ValueLength = 2
elif TypeName == 'UINT32':
ValueLength = 4
elif TypeName == 'UINT64':
ValueLength = 8
elif TypeName == 'VOID*':
if MaxSize == 0:
return OPTION_MISSING, "PcdMaxSize is not specified for VOID* type PCD."
ValueLength = MaxSize
else:
return PARAMETER_INVALID, "PCD type %s is not valid." %(CommandOptions.PcdTypeName)
#
# Check PcdValue is in the input binary file.
#
if ValueOffset + ValueLength > FileLength:
return PARAMETER_INVALID, "PcdOffset + PcdMaxSize(DataType) is larger than the input file size."
#
# Read binary file into array
#
FileHandle = open (FileName, 'rb')
ByteArray = array.array('B')
ByteArray.fromfile(FileHandle, FileLength)
FileHandle.close()
OrigByteList = ByteArray.tolist()
ByteList = ByteArray.tolist()
#
# Clear the data in file
#
for Index in range(ValueLength):
ByteList[ValueOffset + Index] = 0
#
# Patch value into offset
#
ValueString = ValueString.upper()
ValueNumber = 0
if TypeName == 'BOOLEAN':
#
# Get PCD value for BOOLEAN data type
#
try:
if ValueString == 'TRUE':
ValueNumber = 1
elif ValueString == 'FALSE':
ValueNumber = 0
elif ValueString.startswith('0X'):
ValueNumber = int (Value, 16)
else:
ValueNumber = int (Value)
if ValueNumber != 0:
ValueNumber = 1
except:
return PARAMETER_INVALID, "PCD Value %s is not valid dec or hex string." %(ValueString)
#
# Set PCD value into binary data
#
ByteList[ValueOffset] = ValueNumber
elif TypeName in ['UINT8', 'UINT16', 'UINT32', 'UINT64']:
#
# Get PCD value for UINT* data type
#
try:
if ValueString.startswith('0X'):
ValueNumber = int (ValueString, 16)
else:
ValueNumber = int (ValueString)
except:
return PARAMETER_INVALID, "PCD Value %s is not valid dec or hex string." %(ValueString)
#
# Set PCD value into binary data
#
for Index in range(ValueLength):
ByteList[ValueOffset + Index] = ValueNumber % 0x100
ValueNumber = ValueNumber / 0x100
elif TypeName == 'VOID*':
if ValueString.startswith("L "):
#
# Patch Unicode String
#
Index = 0
for ByteString in ValueString[2:]:
#
# Reserve zero as unicode tail
#
if Index + 2 >= ValueLength:
break
#
# Set string value one by one
#
ByteList[ValueOffset + Index] = ord(ByteString)
Index = Index + 2
elif ValueString.startswith("{") and ValueString.endswith("}"):
#
# Patch {0x1, 0x2, ...} byte by byte
#
ValueList = ValueString[1 : len(ValueString) - 1].split(', ')
Index = 0
try:
for ByteString in ValueList:
if ByteString.upper().startswith('0X'):
ByteValue = int(ByteString, 16)
else:
ByteValue = int(ByteString)
ByteList[ValueOffset + Index] = ByteValue % 0x100
Index = Index + 1
if Index >= ValueLength:
break
except:
return PARAMETER_INVALID, "PCD Value %s is not valid dec or hex string array." %(ValueString)
else:
#
# Patch ascii string
#
Index = 0
for ByteString in ValueString:
#
# Reserve zero as string tail
#
if Index + 1 >= ValueLength:
break
#
# Set string value one by one
#
ByteList[ValueOffset + Index] = ord(ByteString)
Index = Index + 1
#
# Update new data into input file.
#
if ByteList != OrigByteList:
ByteArray = array.array('B')
ByteArray.fromlist(ByteList)
FileHandle = open (FileName, 'wb')
ByteArray.tofile(FileHandle)
FileHandle.close()
return 0, "Patch Value into File %s successfully." %(FileName)
## Parse command line options
#
# Using standard Python module optparse to parse command line option of this tool.
#
# @retval Options A optparse.Values object containing the parsed options
# @retval InputFile Path of file to be trimmed
#
def Options():
OptionList = [
make_option("-f", "--offset", dest="PcdOffset", action="store", type="int",
help="Start offset to the image is used to store PCD value."),
make_option("-u", "--value", dest="PcdValue", action="store",
help="PCD value will be updated into the image."),
make_option("-t", "--type", dest="PcdTypeName", action="store",
help="The name of PCD data type may be one of VOID*,BOOLEAN, UINT8, UINT16, UINT32, UINT64."),
make_option("-s", "--maxsize", dest="PcdMaxSize", action="store", type="int",
help="Max size of data buffer is taken by PCD value.It must be set when PCD type is VOID*."),
make_option("-v", "--verbose", dest="LogLevel", action="store_const", const=EdkLogger.VERBOSE,
help="Run verbosely"),
make_option("-d", "--debug", dest="LogLevel", type="int",
help="Run with debug information"),
make_option("-q", "--quiet", dest="LogLevel", action="store_const", const=EdkLogger.QUIET,
help="Run quietly"),
make_option("-?", action="help", help="show this help message and exit"),
]
# use clearer usage to override default usage message
UsageString = "%prog -f Offset -u Value -t Type [-s MaxSize] <input_file>"
Parser = OptionParser(description=__copyright__, version=__version__, option_list=OptionList, usage=UsageString)
Parser.set_defaults(LogLevel=EdkLogger.INFO)
Options, Args = Parser.parse_args()
# error check
if len(Args) == 0:
EdkLogger.error("PatchPcdValue", PARAMETER_INVALID, ExtraData=Parser.get_usage())
InputFile = Args[len(Args) - 1]
return Options, InputFile
## Entrance method
#
# This method mainly dispatch specific methods per the command line options.
# If no error found, return zero value so the caller of this tool can know
# if it's executed successfully or not.
#
# @retval 0 Tool was successful
# @retval 1 Tool failed
#
def Main():
try:
#
# Check input parameter
#
EdkLogger.Initialize()
CommandOptions, InputFile = Options()
if CommandOptions.LogLevel < EdkLogger.DEBUG_9:
EdkLogger.SetLevel(CommandOptions.LogLevel + 1)
else:
EdkLogger.SetLevel(CommandOptions.LogLevel)
if not os.path.exists (InputFile):
EdkLogger.error("PatchPcdValue", FILE_NOT_FOUND, ExtraData=InputFile)
return 1
if CommandOptions.PcdOffset == None or CommandOptions.PcdValue == None or CommandOptions.PcdTypeName == None:
EdkLogger.error("PatchPcdValue", OPTION_MISSING, ExtraData="PcdOffset or PcdValue of PcdTypeName is not specified.")
return 1
if CommandOptions.PcdTypeName.upper() not in ["BOOLEAN", "UINT8", "UINT16", "UINT32", "UINT64", "VOID*"]:
EdkLogger.error("PatchPcdValue", PARAMETER_INVALID, ExtraData="PCD type %s is not valid." %(CommandOptions.PcdTypeName))
return 1
if CommandOptions.PcdTypeName.upper() == "VOID*" and CommandOptions.PcdMaxSize == None:
EdkLogger.error("PatchPcdValue", OPTION_MISSING, ExtraData="PcdMaxSize is not specified for VOID* type PCD.")
return 1
#
# Patch value into binary image.
#
ReturnValue, ErrorInfo = PatchBinaryFile (InputFile, CommandOptions.PcdOffset, CommandOptions.PcdTypeName, CommandOptions.PcdValue, CommandOptions.PcdMaxSize)
if ReturnValue != 0:
EdkLogger.error("PatchPcdValue", ReturnValue, ExtraData=ErrorInfo)
return 1
return 0
except:
return 1
if __name__ == '__main__':
r = Main()
sys.exit(r)
|
gpl-2.0
|
Bachaco-ve/odoo
|
addons/auth_crypt/auth_crypt.py
|
179
|
4021
|
import logging
from passlib.context import CryptContext
import openerp
from openerp.osv import fields, osv
from openerp.addons.base.res import res_users
res_users.USER_PRIVATE_FIELDS.append('password_crypt')
_logger = logging.getLogger(__name__)
default_crypt_context = CryptContext(
# kdf which can be verified by the context. The default encryption kdf is
# the first of the list
['pbkdf2_sha512', 'md5_crypt'],
# deprecated algorithms are still verified as usual, but ``needs_update``
# will indicate that the stored hash should be replaced by a more recent
# algorithm. Passlib 1.6 supports an `auto` value which deprecates any
# algorithm but the default, but Debian only provides 1.5 so...
deprecated=['md5_crypt'],
)
class res_users(osv.osv):
_inherit = "res.users"
def init(self, cr):
_logger.info("Hashing passwords, may be slow for databases with many users...")
cr.execute("SELECT id, password FROM res_users"
" WHERE password IS NOT NULL"
" AND password != ''")
for uid, pwd in cr.fetchall():
self._set_password(cr, openerp.SUPERUSER_ID, uid, pwd)
def set_pw(self, cr, uid, id, name, value, args, context):
if value:
self._set_password(cr, uid, id, value, context=context)
self.invalidate_cache(cr, uid, context=context)
def get_pw( self, cr, uid, ids, name, args, context ):
cr.execute('select id, password from res_users where id in %s', (tuple(map(int, ids)),))
return dict(cr.fetchall())
_columns = {
'password': fields.function(get_pw, fnct_inv=set_pw, type='char', string='Password', invisible=True, store=True),
'password_crypt': fields.char(string='Encrypted Password', invisible=True, copy=False),
}
def check_credentials(self, cr, uid, password):
# convert to base_crypt if needed
cr.execute('SELECT password, password_crypt FROM res_users WHERE id=%s AND active', (uid,))
encrypted = None
if cr.rowcount:
stored, encrypted = cr.fetchone()
if stored and not encrypted:
self._set_password(cr, uid, uid, stored)
self.invalidate_cache(cr, uid)
try:
return super(res_users, self).check_credentials(cr, uid, password)
except openerp.exceptions.AccessDenied:
if encrypted:
valid_pass, replacement = self._crypt_context(cr, uid, uid)\
.verify_and_update(password, encrypted)
if replacement is not None:
self._set_encrypted_password(cr, uid, uid, replacement)
if valid_pass:
return
raise
def _set_password(self, cr, uid, id, password, context=None):
""" Encrypts then stores the provided plaintext password for the user
``id``
"""
encrypted = self._crypt_context(cr, uid, id, context=context).encrypt(password)
self._set_encrypted_password(cr, uid, id, encrypted, context=context)
def _set_encrypted_password(self, cr, uid, id, encrypted, context=None):
""" Store the provided encrypted password to the database, and clears
any plaintext password
:param uid: id of the current user
:param id: id of the user on which the password should be set
"""
cr.execute(
"UPDATE res_users SET password='', password_crypt=%s WHERE id=%s",
(encrypted, id))
def _crypt_context(self, cr, uid, id, context=None):
""" Passlib CryptContext instance used to encrypt and verify
passwords. Can be overridden if technical, legal or political matters
require different kdfs than the provided default.
Requires a CryptContext as deprecation and upgrade notices are used
internally
"""
return default_crypt_context
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
xuxiao19910803/edx
|
lms/djangoapps/mobile_api/video_outlines/views.py
|
121
|
4888
|
"""
Video Outlines
We only provide the listing view for a video outline, and video outlines are
only displayed at the course level. This is because it makes it a lot easier to
optimize and reason about, and it avoids having to tackle the bigger problem of
general XBlock representation in this rather specialized formatting.
"""
from functools import partial
from django.http import Http404, HttpResponse
from mobile_api.models import MobileApiConfig
from rest_framework import generics
from rest_framework.response import Response
from opaque_keys.edx.locator import BlockUsageLocator
from xmodule.exceptions import NotFoundError
from xmodule.modulestore.django import modulestore
from ..utils import mobile_view, mobile_course_access
from .serializers import BlockOutline, video_summary
@mobile_view()
class VideoSummaryList(generics.ListAPIView):
"""
**Use Case**
Get a list of all videos in the specified course. You can use the
video_url value to access the video file.
**Example Request**
GET /api/mobile/v0.5/video_outlines/courses/{organization}/{course_number}/{course_run}
**Response Values**
If the request is successful, the request returns an HTTP 200 "OK"
response along with an array of videos in the course. The array
includes the following information for each video.
* named_path: An array that consists of the display names of the
courseware objects in the path to the video.
* path: An array that specifies the complete path to the video in
the courseware hierarchy. The array contains the following
values.
* category: The type of division in the course outline.
Possible values are "chapter", "sequential", and "vertical".
* name: The display name for the object.
* id: The The unique identifier for the video.
* section_url: The URL to the first page of the section that
contains the video in the Learning Management System.
* summary: An array of data about the video that includes the
following values.
* category: The type of component. This value will always be "video".
* duration: The length of the video, if available.
* id: The unique identifier for the video.
* language: The language code for the video.
* name: The display name of the video.
* size: The size of the video file.
* transcripts: An array of language codes and URLs to available
video transcripts. Use the URL value to access a transcript
for the video.
* video_thumbnail_url: The URL to the thumbnail image for the
video, if available.
* video_url: The URL to the video file. Use this value to access
the video.
* unit_url: The URL to the unit that contains the video in the Learning
Management System.
"""
@mobile_course_access(depth=None)
def list(self, request, course, *args, **kwargs):
video_profiles = MobileApiConfig.get_video_profiles()
video_outline = list(
BlockOutline(
course.id,
course,
{"video": partial(video_summary, video_profiles)},
request,
video_profiles,
)
)
return Response(video_outline)
@mobile_view()
class VideoTranscripts(generics.RetrieveAPIView):
"""
**Use Case**
Get a transcript for a specified video and language.
**Example request**
GET /api/mobile/v0.5/video_outlines/transcripts/{organization}/{course_number}/{course_run}/{video ID}/{language code}
**Response Values**
If the request is successful, the request returns an HTTP 200 "OK"
response along with an .srt file that you can download.
"""
@mobile_course_access()
def get(self, request, course, *args, **kwargs):
block_id = kwargs['block_id']
lang = kwargs['lang']
usage_key = BlockUsageLocator(
course.id, block_type="video", block_id=block_id
)
try:
video_descriptor = modulestore().get_item(usage_key)
transcripts = video_descriptor.get_transcripts_info()
content, filename, mimetype = video_descriptor.get_transcript(transcripts, lang=lang)
except (NotFoundError, ValueError, KeyError):
raise Http404(u"Transcript not found for {}, lang: {}".format(block_id, lang))
response = HttpResponse(content, content_type=mimetype)
response['Content-Disposition'] = 'attachment; filename="{}"'.format(filename.encode('utf-8'))
return response
|
agpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.